blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
37a94c8b4a0c2494709bafa11a77d80f535438ea
|
2b3f05bc191a15317aedc443850f432513aec737
|
/R/RcppExports.R
|
eec31ac519cba4c4e470917cfdb4e103849c6a7e
|
[] |
no_license
|
ThinkR-open/utf8splain
|
2f435e613b22727276dc3dffbe8644ba80dcda97
|
2fb7ed57c773ba5d462b7ab79a340eddb31b113e
|
refs/heads/master
| 2021-01-02T08:54:14.154004
| 2017-08-29T13:34:43
| 2017-08-29T13:34:43
| 99,092,294
| 9
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 225
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
parse_binary <- function(s) {
.Call('_utf8splain_parse_binary', PACKAGE = 'utf8splain', s)
}
|
2947d56cc0414657e6565153bea29d3f76b43420
|
4d07eecae0429dc15066b34fbe512b8ff2ae53ea
|
/mds/ps/wt2-ps-test.R
|
7f42f49b1a116c1d723cefabf68308014add17e7
|
[] |
no_license
|
distanceModling/phd-smoothing
|
7ff8ba7bace1a7d1fa9e2fcbd4096b82a126c53c
|
80305f504865ce6afbc817fff83382678864b11d
|
refs/heads/master
| 2020-12-01T09:31:24.448615
| 2012-03-27T18:35:45
| 2012-03-27T18:35:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,689
|
r
|
wt2-ps-test.R
|
# function to run simulations on the wigglytop 2 domain
# Copyright David Lawrence Miller 2009.
source("mds.R")
samp.size=250
noise.level=0.05
## create a boundary...
bnd <- read.csv("wt2-verts.csv",header=FALSE)
names(bnd)<-c("x","y")
## Simulate some fitting data, inside boundary...
gendata<-read.csv("wt2truth.csv",header=TRUE)
gendata<-list(x=gendata$x[gendata$inside==1],
y=gendata$y[gendata$inside==1],
z=gendata$z[gendata$inside==1])
na.ind<-!(is.na(gendata$x)&is.na(gendata$y)&is.na(gendata$z))
gendata<-list(x=gendata$x[na.ind],
y=gendata$y[na.ind],
z=gendata$z[na.ind])
# attempt to get around the inside bug
bnd.neg<-list(x=-bnd$x,y=-bnd$y)
onoff<-inSide(bnd.neg,-gendata$x,-gendata$y)
gendata<-list(x=gendata$x[onoff],
y=gendata$y[onoff],
z=gendata$z[onoff])
# create the sample index
samp.ind<-sample(1:length(gendata$x),samp.size)
## create the sample
gendata.samp<- list(x=gendata$x[samp.ind],
y=gendata$y[samp.ind],
z=gendata$z[samp.ind])
gendata<-list(x=gendata$x[-samp.ind],
y=gendata$y[-samp.ind],
z=gendata$z[-samp.ind])
# create the grid
source("wt2-create-grid.R")
my.grid<-wt2_create_grid()
## do the MDS on the grid
# create D
D.grid<-create_distance_matrix(my.grid$x,my.grid$y,bnd)
# perform mds on D
grid.mds<-cmdscale(D.grid,eig=TRUE,k=2)
# sample points insertion
samp.mds<-insert.mds(gendata.samp,my.grid,grid.mds,bnd)
# prediction points insertion
pred.mds<-insert.mds(gendata,my.grid,grid.mds,bnd)
# add noise
noise<-noise.level*rnorm(length(samp.ind))
#> summary(gendata$z)
# Min. 1st Qu. Median Mean 3rd Qu. Max.
#0.000000 0.000236 0.269300 0.276300 0.479600 0.850000
# mapped sample data
samp.data<-list(x=c(),y=c(),z=c())
samp.data$x<-samp.mds[,1]
samp.data$y<-samp.mds[,2]
samp.data$z<-gendata.samp$z+noise
# non-mapped sample data
nsamp.data<-list(x=c(),y=c(),z=c())
nsamp.data$x<-gendata.samp$x
nsamp.data$y<-gendata.samp$y
nsamp.data$z<-gendata.samp$z+noise
### create prediction data
# non-mapped prediction data
npred.data<-list(x=rep(0,length(gendata$x)+length(samp.data$x)),
y=rep(0,length(gendata$x)+length(samp.data$y)))
npred.data$x[-samp.ind]<-gendata$x
npred.data$y[-samp.ind]<-gendata$y
npred.data$x[samp.ind]<-nsamp.data$x
npred.data$y[samp.ind]<-nsamp.data$y
# put this in the correct format
pred.data<-list(x=rep(0,length(gendata$x)+length(samp.data$x)),
y=rep(0,length(gendata$x)+length(samp.data$y)))
pred.data$x[-samp.ind]<-pred.mds[,1]
pred.data$y[-samp.ind]<-pred.mds[,2]
pred.data$x[samp.ind]<-samp.mds[,1]
pred.data$y[samp.ind]<-samp.mds[,2]
### P-splines knot stuff here
source("ps/wt2-create-grid.R")
knots<-wt2_create_grid()
knots.mds<-insert.mds(knots,my.grid,grid.mds,bnd)
knots.mds<-list(x=knots.mds[,1],y=knots.mds[,2])
# extra knots
x.extra<-c(max(knots.mds$x)+0.25,max(knots.mds$x)+0.25,
min(knots.mds$x)-0.25,min(knots.mds$x)-0.25)
y.extra<-c(max(knots.mds$y)+0.25,min(knots.mds$y)-0.25,
max(knots.mds$y)+0.25,min(knots.mds$y)-0.25)
knots.mds<-list(x=c(knots.mds[,1],x.extra),y=c(knots.mds[,2],y.extra))
### Now do some fitting and prediction
### mapping
b.mapped<-gam(z~s(x,y,k=49),data=samp.data)
fv <- predict(b.mapped,newdata=pred.data)
# create the image
gendata.ind <- read.csv("wt2truth.csv",header=TRUE)
ind<-c(1:length(gendata.ind$x))
pred.mat<-rep(NA,length(gendata.ind$x))
ind<-ind[gendata.ind$inside==1]
na.ind<-!(is.na(gendata.ind$x[gendata.ind$inside==1])&is.na(gendata.ind$y[gendata.ind$inside==1])&is.na(gendata.ind$z[gendata.ind$inside==1]))
ind<-ind[na.ind]
ind<-ind[onoff]
# plot for truth, mds, tprs and soap
par(mfrow=c(1,2))
# axis scales
xscale<-seq(min(gendata$x),max(gendata$x),length.out=50)
yscale<-seq(min(gendata$y),max(gendata$y),length.out=50)
pred.mat<-rep(NA,length(gendata.ind$x))
pred.mat[ind]<-gendata.ind$z[ind]
pred.mat<-matrix(pred.mat,50,50)
image(xscale,yscale,pred.mat,main="truth",asp=1,las=1,xlab="x",ylab="y",col=heat.colors(100))
contour(xscale,yscale,pred.mat,add=T)
pred.mat<-rep(NA,length(gendata.ind$x))
pred.mat[ind]<-fv
pred.mat<-matrix(pred.mat,50,50)
image(xscale,yscale,pred.mat,main="mds",asp=1,las=1,xlab="x",ylab="y",col=heat.colors(100))
contour(xscale,yscale,pred.mat,add=T)
### calculate MSEs
#mses<-list(mds=mean((fv-gendata.ind$z[ind])^2,na.rm=T),
# tprs=mean((fv.tprs-gendata.ind$z[ind])^2,na.rm=T),
# soap=mean((fv.soap-gendata.ind$z[ind])^2,na.rm=T))
# print them
#cat("mds MSE=" ,mses$mds,"\n")
#cat("tprs MSE=",mses$tprs,"\n")
#cat("soap MSE=",mses$soap,"\n")
|
5051a47c62490c87ce335541fe1d1d4f9c3dc03b
|
b8e977c2cadf840e3e75b3ec143a5a63407ae84f
|
/PS4_i.R
|
e5a6f5d3750555ebd5b8e89362f8c7725d090364
|
[] |
no_license
|
HaixaingZ/ESE5023_Assignments
|
fae2164cc940fd444002344fbc8ca5515429b023
|
d826eb82a1eac8ebe268634ddbbb0f1332bddf72
|
refs/heads/main
| 2023-02-14T21:52:01.412769
| 2021-01-08T11:38:51
| 2021-01-08T11:38:51
| 302,874,984
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,238
|
r
|
PS4_i.R
|
#1
library(tidyr)
library(dplyr)
library(ggplot2)
tracking_data <- read.csv("Tracking_data.csv", header = T)
td_tbl <- as_tibble(tracking_data)
names(td_tbl)
#boxplot flyhing hight average
td_tbl %>%
filter(Longitude != 200) %>%
ggplot(aes(x = UUID, y = Geoid_height, color=UUID))+
geom_boxplot(na.rm = T)+
theme_classic()
# MingYANG noticed:
# it is not clear and aesthetic, maybe you can transform height(y-axis) as log data
# the end
#Time series flying hight
install.packages("lubridate")
library(lubridate)
TS_data <- td_tbl %>%
group_by(UUID) %>%
mutate(date_td = as.Date(Collecting_time)) %>%
filter(Geoid_height >= 0 && UUID =="f8f86b8da0c4")
head(TS_data)
tail(TS_data)
Date_start1 <- as.Date("2020-06-27")
Date_end1 <- as.Date("2020-11-09")
JD_start1 <- yday(Date_start1)
xaxis <- seq(Date_start1, Date_end1, by = "day")
High_ts <- ts(TS_data$Geoid_height[1:136], start=c(2020, JD_start1), frequency = 365)
str(High_ts)
plot(xaxis, High_ts, type = "l")
# MingYANG noticed:
# it is too simple, and should be classified into 1 point plot given by Prof.ZHU in the explaination of this assignment
# the end
#Histogram
td_tbl %>%
group_by(UUID) %>%
ggplot(aes( x = Speed, color=UUID))+
geom_histogram()+
theme_classic()
# MingYANG noticed:
# it`s goof for combining three groups of data into one histogram, but it is not aesthetic
# the end
#Scatter plot
td_tbl %>%
group_by(UUID) %>%
filter(Longitude != 0 && Latitude != 200 ) %>%
ggplot(aes(x = Longitude, y = Latitude, color=UUID))+
geom_point(na.rm = T)+
theme_classic()+
ylim(0,180)+
xlim(0,180)
# MingYANG noticed:
# it is not clear and aesthetic, xlim and ylim should be well-matched
# the end
#Image plot
#2
baoan_data <- read.csv("2281305.csv",header = T)
library(tidyr)
library(dplyr)
library(ggplot2)
#2.1
banan_data_tbl <- as_tibble(baoan_data)
head(banan_data_tbl)
names(banan_data_tbl)
bdt <- banan_data_tbl %>%
filter(TMP != "+9999") %>%
mutate(date1 = substr(DATE,1,7), tmp = as.numeric(substr(TMP,3,5))/10) %>%
group_by(date1) %>%
summarise(Tmean = mean(tmp))
temperture_ts <- ts(bdt$Tmean, start=c(2010,1), frequency = 12)
plot(temperture_ts, type = "l")
#2.2
temperture_components <- decompose(temperture_ts)
plot(temperture_components)
# MingYANG noticed:
# Check whether the error part follows a white noise distribution.
# the end
#2.3
acf(temperture_ts)
pacf(temperture_ts)
install.packages("forecast")
library(forecast)
model_arima <- auto.arima(temperture_ts)
model_arima
# MingYANG noticed:
# Describe the fitting process in details in your report.
# the end
#2.4
bdt2 <- bdt %>%
filter(date1 < "2020-08")
temperture_ts2 <- ts(bdt2$Tmean, start=c(2010,1), frequency = 12)
model_arima2 <- auto.arima(temperture_ts2)
meanT_forecast <- 5
month_in_plot <- 30
forecast_2meanT <- forecast(model_arima2, meanT_forecast)
plot(forecast(model_arima2, meanT_forecast), include = month_in_plot, xlab = "time", ylab = "mean_temperture")
#check
bdt %>%
filter(date1 > "2020-08") #check 2020-08 actual T
forecast_2meanT$mean[1] #Sep. T prediction
forecast_2meanT$mean[2] #Sep. T prediction
|
69ceae7a7dac7fcd019ec5904a7aca1cc8657712
|
fc71968aeab6975d4fedff2d2f9f9208d2ba2f5f
|
/gp_all/gp_hetsked/3_post.R
|
97cecb9c9c925985a1d7825508743f7f7bfcf9c3
|
[] |
no_license
|
tkmckenzie/tyche
|
855c3032fc3087e3fde3b02c264bfcbbabab3806
|
ac2bb9aafda1b195136a21ddeec6f41d3c540b15
|
refs/heads/master
| 2023-08-23T04:56:17.892335
| 2023-08-10T22:40:33
| 2023-08-10T22:40:33
| 151,305,485
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,182
|
r
|
3_post.R
|
library(abind)
library(ggplot2)
library(MASS)
library(rstan)
setwd("~/git/fortuna/gp_hetsked")
rm(list = ls())
load("data.RData")
load("gp_hetsked_fit.RData")
stan.extract = extract(stan.fit)
#Prediction functions
cov.exp.quad = function(x.1, x.2, alpha, rho){
N.1 = length(x.1)
N.2 = length(x.2)
alpha.sq = alpha^2
rho.sq = rho^2
result = matrix(NA, nrow = N.1, ncol = N.2)
for (i in 1:N.1){
for (j in 1:N.2){
result[i, j] = alpha.sq * exp(-(x.1[i] - x.2[j])^2 / (2 * rho.sq))
}
}
return(result)
}
gp.pred = function(X.pred, y, x, alpha, rho, log.sigma, alpha.sigma, rho.sigma, log.sigma.error.sd){
#This returns predicted f mean (col 1) and variance (col 2)
N.pred = length(x.pred)
N = length(y)
#First generate realizations of sigma
Sigma = cov.exp.quad(x, x, alpha.sigma, rho.sigma) + log.sigma.error.sd^2 * diag(N)
L.Sigma = t(chol(Sigma))
K.div.y = solve(t(L.Sigma), solve(L.Sigma, log.sigma))
K.x.x.pred = cov.exp.quad(x, x.pred, alpha.sigma, rho.sigma)
log.sigma.pred.mu = t(K.x.x.pred) %*% K.div.y
v.pred = solve(L.Sigma, K.x.x.pred)
cov.f.pred = cov.exp.quad(X.pred, X.pred, alpha.sigma, rho.sigma) - t(v.pred) %*% v.pred
# log.sigma.rng = mvrnorm(1, log.sigma.pred.mu, cov.f.pred + log.sigma.error.sd^2 * diag(N.pred))
# sigma = exp(log.sigma.rng)
#Noting that sigma ~ MV ln N(log.sigma.pred.mu, cov.f.pred + log.sigma.error.sd^2 * diag(N.pred)),
#E[sigma]_i = exp(log.sigma.pred.mu_i + cov.f.pred_ii + log.sigma.error.sd_i^2)
sigma = exp(log.sigma.pred.mu + diag(cov.f.pred) + log.sigma.error.sd^2)
#Now fit mean of y
Sigma = cov.exp.quad(x, x, alpha, rho) + diag(exp(log.sigma)^2)
L.Sigma = t(chol(Sigma))
K.div.y = solve(t(L.Sigma), solve(L.Sigma, y))
K.x.x.pred = cov.exp.quad(x, x.pred, alpha, rho)
f.pred.mu = t(K.x.x.pred) %*% K.div.y
v.pred = solve(L.Sigma, K.x.x.pred)
cov.f.pred = cov.exp.quad(x.pred, x.pred, alpha, rho) - t(v.pred) %*% v.pred
result = cbind(f.pred.mu, diag(cov.f.pred) + sigma^2) #Unconditional variance of y
# result = cbind(f.pred.mu, sigma^2) #Variance of y|f
return(result)
}
gp.pred.i = function(i){
alpha = stan.extract$alpha[i]
rho = stan.extract$rho[i]
log.sigma = stan.extract$log_sigma[i,]
alpha.sigma = stan.extract$alpha_sigma[i]
rho.sigma = stan.extract$rho_sigma[i]
log.sigma.error.sd = stan.extract$log_sigma_error_sd[i]
return(gp.pred(x.pred, y, x, alpha, rho, log.sigma, alpha.sigma, rho.sigma, log.sigma.error.sd))
}
#Prediction
N.pred = 100
x.pred = seq(min(x), max(x), length.out = N.pred)
f.pred = lapply(1:sample.iter, gp.pred.i)
f.pred = abind(f.pred, along = 3)
f.pred.mean = apply(f.pred, 1:2, mean)
#Plot
p = 0.9
point.df = data.frame(x = x, y = y)
fit.df = data.frame(x = x.pred, y = f.pred.mean[,1],
y.low = f.pred.mean[,1] - qnorm(p, sd = sqrt(f.pred.mean[,2])),
y.high = f.pred.mean[,1] + qnorm(p, sd = sqrt(f.pred.mean[,2])))
ggplot(point.df, aes(x, y)) +
geom_point() +
geom_line(data = fit.df, color = "red") +
geom_ribbon(data = fit.df, aes(ymin = y.low, ymax = y.high), alpha = 0.25) +
theme_bw()
|
f647b6285bbb0cb99b5e8cde235c3c55db7241d4
|
c5baacf45414864179c18c4878af5464e103ece8
|
/Lab18/CLI/calling_py_from_r.r
|
b128168597eb0dbf87d451a76a5df33d396202a3
|
[] |
no_license
|
VladimirShleyev/Method_R_doc
|
ed1cbbd9b59cc1cec445e87a9e5696f665f83065
|
85aa7c64e3816108f0e84a0ff1efa11cc8e37d3b
|
refs/heads/master
| 2023-07-16T00:29:57.871114
| 2021-09-03T12:13:19
| 2021-09-03T12:13:19
| 286,023,236
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 600
|
r
|
calling_py_from_r.r
|
# -*- coding: utf-8 -*-
command = 'python'
#обратите внимание - одиночные и двойные кавычки нужны, если в Пути есть пробелы
path2script = "py_script.py"
# строим вектор из аргументов
string = '"3423423----234234----2342342----234234----234i"'
pattern = "----"
args = c(string, pattern)
# добавляем Путь к скрипту как первый аргумент
allArgs = c(path2script, args)
output = system2(command, args = allArgs, stdout=TRUE)
print(paste("Часть строки:", output))
|
c27e963372ce8c2bf5f76d216636ec96c493ba7c
|
042d2ef2214c0d2a2c24dc9a4d89ffca871f000b
|
/tests/test.R
|
92ffc3075d32848ed5d342c8ea7ea7c1ce4ff0b6
|
[
"MIT"
] |
permissive
|
Afsharov/heartspot
|
f866c7fe559a186476c1db36811ec1ca9ad7db22
|
3c896ce47040e891aad8e972c7e30d1b85d6144b
|
refs/heads/master
| 2020-07-24T08:12:16.665421
| 2020-06-10T09:39:50
| 2020-06-10T09:39:50
| 207,860,297
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 153
|
r
|
test.R
|
library(shinytest)
library(testthat)
context("Test shiny app")
#open shiny app
app <- ShinyDriver$new('path_to_shiny_app')
#stop shiny app
app$stop()
|
41899649ff551b10200cb76811a2d0d5c3ac4983
|
afdd69685e8073c8259d4617a6e0bd49e8a98681
|
/TestScript.R
|
150ceee3c85b8412c1c9c423ac5f11c331899ccf
|
[] |
no_license
|
fcavaco/ProgrammingAssignment2
|
28a5802e279b0cd4db791d2ee4cbdfc34a45e314
|
c1f127ef86d39792e1d75aac26aea8df51127467
|
refs/heads/master
| 2021-01-12T20:15:44.643130
| 2015-01-25T03:25:05
| 2015-01-25T03:25:05
| 29,701,312
| 1
| 0
| null | 2015-01-22T21:31:01
| 2015-01-22T21:31:00
| null |
UTF-8
|
R
| false
| false
| 780
|
r
|
TestScript.R
|
#source('./cachematrix.R')
# Generate an invertible matrix
set.seed(123)
n=2000 # matrix dimension
M <- matrix(runif(n^2),n) # my randomnly generated matrix
#Mi <- solve(A) # my matrix inverse calculation
# create a cache (list of functions) matrix
cache <- makeCacheMatrix(M)
# 1. test how long it take to calculate the matrix inverse.
# keep in mind first time around the matrix inverse is not cached...
ti1 <- proc.time()
Mi1 <- cacheSolve(cache)
tf1 <- proc.time()
print((tf1-ti1)[1:3])
# 2. 2nd time around should retrieve it from cache. hence taking much less time...
ti2 <- proc.time()
Mi2 <- cacheSolve(cache)
tf2 <- proc.time()
print((tf2-ti2)[1:3])
# just to confirm these two Matrix inverse are the same...
print(sum(Mi1 != Mi2))
|
9593d390b95f651b0d4689565e1da54c614bfbdb
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/texmex/R/hist.evm.R
|
bef3f735fadd79a42af04f632356d812dfba0876
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,349
|
r
|
hist.evm.R
|
hist.evmOpt <-
function(x, xlab, ylab, main, ...){
# Want parameters as a matrix with one row for passing
# through to family$rng etc.
a <- t(x$coefficients)
u <- x$threshold
if (!is.finite(u)){ u <- min(x$data$y) }
# FOLLOWING if BLOCK COMMENTED OUT TO ACCOUNT FOR DIFFERENCE
# BETWEEN GEV AND GPD. MIGHT HAVE TO DO SOMETHING MORE
# SENSIBLE LATER.
# if(a[2] < 0){
# UpperEndPoint <- u - a[1]/a[2]
# }
# else {
UpperEndPoint <- Inf
# }
dat <- x$data$y
dfun <- x$family$density
h <- hist(dat, plot = FALSE)
xx <- seq(u, min(UpperEndPoint, max(h$breaks)), length = 100)
y <- dfun(xx, a, x)
breaks <- seq(from=min(dat),to=max(dat),len=nclass.Sturges(dat)+1)
res <- list(dat=dat, dens=cbind(x=xx, y=y), breaks=breaks)
oldClass(res) <- "hist.evmOpt"
res
}
plot.hist.evmOpt <- function(x, xlab=NULL, ylab=NULL, main=NULL, ...){
if (missing(xlab) || is.null(xlab)) xlab <- "Data"
if (missing(ylab) || is.null(ylab)) ylab <- ""
if (missing(main) || is.null(main)) main <- "Histogram and density"
hist(x$dat, prob = TRUE, ylim = c(0, max(x$dens[, 2])),
xlab=xlab, ylab=ylab, main=main, breaks = x$breaks, ...)
lines(x$dens[, 1], x$dens[, 2], col = 4)
rug(x$dat)
invisible()
}
print.hist.evmOpt <- plot.hist.evmOpt
|
4817bd8f6b2a2a02488f081f69445f4a7d661d31
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/OTE/examples/Body.Rd.R
|
f9f8721ca655b892c0ac45d3cca58dc6c64ee72f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 167
|
r
|
Body.Rd.R
|
library(OTE)
### Name: Body
### Title: Exploring Relationships in Body Dimensions
### Aliases: Body
### Keywords: datasets
### ** Examples
data(Body)
str(Body)
|
8b49758248cff1c5d13b75e874d1206e6e4d4345
|
37c46074f9e03ad7fbdc577cf6c528795182f861
|
/server.R
|
eff4fbb05c86ca39fa3c07ba4daaaf4c498eff7a
|
[] |
no_license
|
Eotoke/DDP_Assignment
|
469ee1558ae91be529b537f59a508304f7b8bec9
|
f18dd5e7b091ab977fa8d3828c6a25a3f1d54837
|
refs/heads/master
| 2016-09-06T07:51:06.998156
| 2015-04-26T17:12:07
| 2015-04-26T17:12:07
| 34,621,562
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 739
|
r
|
server.R
|
library(shiny)
library(datasets)
## start of first time setup
#load mtcars
data(mtcars)
#fit a linear regression model based on mpg, hp, wt, am, cyl
fit <- lm(mpg ~ hp + wt + factor(am) + factor(cyl), data=mtcars)
## end of first time setup
shinyServer( function(input, output) {
# Generate an HTML table view of the input data
output$inputTable <- renderTable({
data.frame("hp"=input$horsepower,"wt"=input$weight,"cyl"=input$cylinder,"am"=input$am)
})
# Create a dynamic prediction based on the fitted regression model (fit) and the input values
output$predictionTable <- renderTable({
data.frame("mph"=predict(fit,data.frame("hp"=input$horsepower,"wt"=input$weight,"cyl"=input$cylinder,"am"=input$am)))
})
}
)
|
75397306deea40148dbebb541a91df84c715722a
|
3980ac1a2de7fe2c607d3b7b38d93a8dd0855337
|
/example_scripts/intro_to_R.R
|
0457ed5be7466dc6e45e811e5b5d41246f27bc10
|
[] |
no_license
|
ea-guerette/ACC-From_thin_R
|
5bda9da9c08c992be9403e33a4466f1447fad39e
|
71aa7a79fd75d0839ca00d8d769b47e720a4344c
|
refs/heads/master
| 2022-11-24T21:32:34.727001
| 2020-07-27T02:32:07
| 2020-07-27T02:32:07
| 258,040,303
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,460
|
r
|
intro_to_R.R
|
#A script (.R file) lets you save your work
#It also lets you write comments on your code, by using #
#to run a line of code, press ctrl+enter while the cursor is on it
#to run several lines of code, you can highlight them with the mouse, then press ctrl+enter.
#You can also use the Run button
# Tab autofills variable names, it's a handy shortcut
#to install packages - they only need to be installed once, so this line can be commented out once it has been executed.
#install.packages("openair")
#Now, load the package(s) you want to use - this needs to be done *every* time you open a new session
library(openair)
#get some data in using {base} function
#in this case we want to import the gapminder.csv file, which is located in the "data" folder in this R project
gapminder <- read.csv("data/gapminder.csv")
names(gapminder) #returns column names
summary(gapminder) #returns a summary (stats if column is numeric) for each column in the dataframe
View(gapminder) #open the dataframe as a tab in the Editor
#very useful for timeseries
head(gapminder) #shows first 6 rows by default
head(gapminder, 10) #shows first 10 rows
tail(gapminder) #shows last 6 rows by default
#how to subset a dataframe using {base} commands and functions
#using {base} functions
#to select a specific column
gapminder$country
gapminder[ ,1] #R uses [row,column] and indices start a at 1 (not 0 like in python)
#as row is not specified, R selects all rows
#to select several columns
gapminder[ ,c(1,4)]
gapminder[ ,1:4] #select columns 1 to 4, inclusively. Note gapminder[ ,c(1:4)] #also works
gapminder[,-c(1,4)] #drops columns 1 and 4
subset(gapminder, select = c("country", "lifeExp"))
#to select rows
gapminder[1:5,] #select first five rows, keeps all columns
gapminder[1:5,1:4] #select a block of data containing first five rows of first four columns
#conditional subsetting of rows
subset(gapminder, country =="Australia") #keeps only rows containing data for Australia
subset(gapminder, country !="Australia") #keeps all countries EXCEPT Australia
#conditional subsetting of rows + selection of variables
subset(gapminder, country =="Australia", select = c(1:4))
#adding a new column
gapminder$gdp <- gapminder$gdpPercap * gapminder$pop #a bit tedious to type
within(gapminder, gdp <- gdpPercap * pop)
#to save a subset of the main data, assign it to a new variable
AusCan <- subset(gapminder, country %in% c("Australia", "Canada")) #note the use of %in% instead of == when using a list of conditions
gapminder_2007 <- subset(gapminder, year == 2007)
#it's often a good idea to have a quick look at the data
#{base} has some plotting functionality
plot(data = gapminder, lifeExp ~ year) #plot() uses a y ~ x formula
#Q1: plot population versus year
#A1: plot(data = gapminder, pop ~ year)
#what if x is a categorical variable?
plot(data = gapminder, lifeExp ~ continent)
#we get a boxplot!
#This is because whoever wrote the function thought this was the best way to display this kind of data.
#we can also make histograms
hist(gapminder_2007$gdpPercap)
#There are many packages out there that let you make really cool plots, it's often a matter of preference which you use.
#because a lot of people are interested in using openair, we will use one of their plotting functions here
##We will now start to use {openair} functions
#plotting the data quickly using the scatterPlot function from {openair}
#Note that the function names are case sensitive!
scatterPlot(gapminder, x = "year", y = "lifeExp")
#Q1: plot population versus year
#Q2: plot life expentancy vs gdp per capita
#some good features of scatterPlot (and all openair plots) are "type" and "group"
#lets try group:
scatterPlot(gapminder, x = "year", y = "lifeExp", group = "continent")
scatterPlot(gapminder, x = "year", y = "lifeExp", group = "pop")
#and now type:
scatterPlot(gapminder, x = "year", y = "lifeExp", type = "continent")
scatterPlot(gapminder, x = "year", y = "lifeExp", type = "pop")
#we can plot a subset
subset(gapminder, country == "Australia")
scatterPlot(subset(gapminder, country == "Australia"), x = "year", y = "lifeExp")
subset(gapminder, year == 1977)
scatterPlot(subset(gapminder, year == 1977), x = "gdpPercap", y = "lifeExp")
# scatterPlot lets you color the points by another variable
scatterPlot(subset(gapminder, year == 1977), x = "gdpPercap", y = "lifeExp", z = "pop")
#you can change the axis scale from linear to logarithmic
scatterPlot(subset(gapminder, year == 1977), x = "gdpPercap", y = "lifeExp", z = "pop", log.x = T)
#openair has been specifically written to handle air quality data.
#It is quite powerful, but expects data in a certain format.
#We will go over how to read in data into the openair format some other time.
#for now, we will use 'mydata' which is an example dataset that comes with the openair package
head(mydata)
tail(mydata)
summary(mydata)
#date, ws, wd are expected, and should be in %Y-%m-%d, m/s and degrees, respectively.
#the nice thing is that is your data is as openair wants it, making plot then feels like magic.
#some examples:
windRose(mydata)
timePlot(mydata, pollutant = c("nox", "no2"))
timeVariation(mydata, pollutant = "o3")
#to plot only the diurnal cycle, assign the plot as a variable
o3_timeVar_plot <- timeVariation(mydata, pollutant = "o3")
print(o3_timeVar_plot, subset = "hour")
#to plot the annual cycle
print(o3_timeVar_plot, subset = "month")
|
7295a32437f9f21145f36982862b46a6be6f38ba
|
aac5ce74aab15e438641afdda0a92c10803a3256
|
/Clusttering/K-Means/Kmeans_plotly_BaseCredito.R
|
234d449fca8b7b84984b27b7874b01f4cad48917
|
[] |
no_license
|
joscelino/Machine_Learning_em_R
|
778b530754103826c2400fe9683e5ac5475048c8
|
698cf1ce17867a6c43a6091657b72f75b529fba7
|
refs/heads/master
| 2023-03-08T16:24:37.999374
| 2021-02-17T14:44:52
| 2021-02-17T14:44:52
| 281,133,419
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,919
|
r
|
Kmeans_plotly_BaseCredito.R
|
library(readr)
library(dplyr)
library(plotly)
# Funcao para identificar NAs em colunas de data frames
funcaoNA <- function(df){
library(pacman)
pacman::p_load(dplyr, tibble)
index_col_na <- NULL
quantidade_na <- NULL
for (i in 1:ncol(df)) {
if(sum(is.na(df[,i])) > 0) {
index_col_na[i] <- i
quantidade_na[i] <- sum(is.na(df[,i]))
}
}
resultados <- data.frame(index_col_na,quantidade_na)
resultados <- resultados %>% filter(quantidade_na>0)
return(resultados)
}
# Mineracao dos dados
dados <- read_csv("D:/Projetos_em_R/Machine_Learning/Dados/credit_card_clients.csv")
funcaoNA(dados)
dados$BILL_TOTAL <- dados$BILL_AMT1 + dados$BILL_AMT2 + dados$BILL_AMT3 +
dados$BILL_AMT4 + dados$BILL_AMT5 + dados$BILL_AMT6
dados$ID <- base::factor(dados$ID)
dados$SEX <- base::factor(dados$SEX)
dados$EDUCATION <- base::factor(dados$EDUCATION)
dados$MARRIAGE <- base::factor(dados$MARRIAGE)
dados$`default payment next month` <- base::factor(dados$`default payment next month`)
dplyr::glimpse(dados)
# Base para modelo de 2 atributos
baseKmeans <- base::data.frame(limite = dados$LIMIT_BAL , gasto = dados$BILL_TOTAL)
baseKmeans <- base::scale(baseKmeans)
# Elbow method
base::set.seed(1)
wcss <- base::vector()
for (i in 1:10) {
# Gerando o modelo
kmeans <- stats::kmeans(x = baseKmeans, centers = i)
wcss[i] <- base::sum(kmeans$withinss)
}
graphics::plot(1:10, wcss, type = 'b', xlab = 'Clusters', ylab = 'WCSS')
# Gerando o modelo para 2 atributos
set.seed(1)
kmeans <- stats::kmeans(x = baseKmeans, centers = 5)
previsoes <- kmeans$cluster
# Gerando o grafico interativo
df <- data.frame(baseKmeans, previsoes)
glimpse(df)
graf <- plot_ly(data = df, x=~limite, y=~gasto,
color = ~previsoes)
graf
#Base de dados para mais de 2 atributos
baseKmeans2 <- base::data.frame(limite = dados$LIMIT_BAL,
gasto = dados$BILL_TOTAL,
educacao = dados$EDUCATION,
civil = dados$MARRIAGE,
idade = dados$AGE)
# Normalizacao dos dados
baseKmeans2$educacao <- base::as.numeric(baseKmeans2$educacao)
baseKmeans2$civil <- base::as.numeric(baseKmeans2$civil)
baseKmeans2 <- base::scale(baseKmeans2)
# Elbow method
base::set.seed(1)
wcss2 <- base::vector()
for (i in 1:10) {
# Gerando o modelo
kmeans2 <- stats::kmeans(x = baseKmeans2, centers = i)
wcss2[i] <- base::sum(kmeans$withinss)
}
graphics::plot(1:10, wcss, type = 'b', xlab = 'Clusters', ylab = 'WCSS2')
# Gerando o modelo para 2 atributos
set.seed(1)
kmeans2 <- stats::kmeans(x = baseKmeans2, centers = 4)
previsoes2 <- kmeans2$cluster
# Gerando o grafico interativo
df2 <- data.frame(baseKmeans2, previsoes2)
glimpse(df2)
graf1 <- plot_ly(data = df2, x = ~limite, y = ~gasto,
color = ~previsoes2)
graf1
pairs(baseKmeans2, col = c(1:4)[previsoes])
|
f405d7f8ea4502525ef634ecfbdde1659760f5c4
|
1b473280443b277ef942c21ed2f786da252dea35
|
/R/op_base.R
|
c9d673ecd3e906dbb0f89fb92b73a7e56875b4b4
|
[] |
no_license
|
jtuomist/OpasnetUtils
|
d52def972ceb5daa0c3076ced4672fe577478863
|
7bbc38d71188f6bab3bfd969584817cca28f2a2d
|
refs/heads/master
| 2021-07-09T22:38:10.228825
| 2020-07-09T16:13:37
| 2020-07-09T16:13:37
| 150,073,444
| 0
| 1
| null | 2019-04-27T13:48:32
| 2018-09-24T08:24:19
|
R
|
UTF-8
|
R
| false
| false
| 306
|
r
|
op_base.R
|
# Old functions that are now wrappers for the opbase-family for compatibility reasons
op_baseGetData <- function(dsn, ident, ...) {
return(opbase.data(ident, ...))
}
op_baseGetLocs <- function(dsn, ident, ...) {
stop('Deprecated method! op_baseGetLocs')
#opbase.old.locations(dsn, ident, ...)
}
|
cb6f7072ed818cce5dc5723d6f031d82d8516066
|
5bd1261a6fa4a9207e634f05b27ac599ca79cabc
|
/Microsphere Persistence in Larvae Exp. 2/Graph_Latex.R
|
8448f1895acb18b4cb58596780e20afef65773c8
|
[] |
no_license
|
miadrianna/Masters_Summer_Project
|
2ee217519f595ac4aa6639ae7d79a77a9ae59331
|
68ebb51d85f6a9a8fa8353edc9809c1825a6f459
|
refs/heads/master
| 2020-07-12T22:19:18.879915
| 2019-08-28T14:11:35
| 2019-08-28T14:11:35
| 204,920,655
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 690
|
r
|
Graph_Latex.R
|
#This code is to create the bar char for the latex treatment
data <- read.csv("./Graph_Data.csv")
data$instar <- c(1.5,5.5,9.5,6.5,11.5,10.5)
df <- data.frame(
"0"=c(12,12,3),
"1"=c(NA,9,7),
"2"=c(NA,NA,2)
)
df <- t(df)
rownames(df) <- c(0,1,2)
colnames(df) <- c(2,3,4)
svg("Graph_Latex.svg", width = 10, height = 7)
barplot(df, beside = TRUE, col=c("darkblue","red","green"),xlab = "Instar",ylim = c(0, 16), ylab = "Number of Individuals with Microspheres", main = "Latex Microsphere Persistence in Larvae")
legend( 10, 15, title = "Days in \n clean water", legend = c("0 days", "1 day", "2 days"), fill=c("darkblue","red","green"), box.col = "white")
dev.off()
|
b37a1ed5965673a5238a870198ea0612025d04fc
|
65d0a9128f2dc04cd97cea6ad2719fe4e78d6a29
|
/man/add_authors.Rd
|
f09d6f3398d184d027a7870a203f344293181cc2
|
[] |
no_license
|
systats/librarrry
|
668adf7008cd077407e76435c709bec03a3376a3
|
0a219f0d95101cf0febd3e9c3e8e47dfd022b2af
|
refs/heads/master
| 2020-04-11T01:06:20.407275
| 2018-11-29T16:08:40
| 2018-11-29T16:08:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 652
|
rd
|
add_authors.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main_scrapping.R
\name{add_authors}
\alias{add_authors}
\title{add_authors}
\usage{
add_authors(ids, n_batchs, batch_size, path = NULL)
}
\arguments{
\item{ids}{A tibble containing a column dc_identifier}
\item{n_batchs}{Corresponds to the number of batch to scrape}
\item{batch_size}{Corresponds to the size of the batchs (max25)}
\item{path}{Path ot the current working directory}
}
\value{
A sample of the dataset which has been scrapped
}
\description{
This function automatially update the current dataset authors by geting new information on not scrapped authors
}
|
9c265f4cdf525325a164051e033b4b56f8603b5b
|
2a43faf3c612f631f6048b0004f7c519f0aa0e29
|
/R/qtl2pleio-package.r
|
a4b5a49b779ef5fe6829009bd4e31e471dcbdbc0
|
[
"MIT"
] |
permissive
|
fboehm/qtl2pleio
|
a78bddec1374a428c218927ebe8af0d9375dc9a8
|
d3406f399e2245502e101ed78f9d680b9827638d
|
refs/heads/master
| 2021-07-22T04:43:58.057545
| 2021-07-13T20:20:18
| 2021-07-13T20:20:18
| 104,493,705
| 10
| 2
|
NOASSERTION
| 2019-06-29T18:34:46
| 2017-09-22T16:06:34
|
R
|
UTF-8
|
R
| false
| false
| 584
|
r
|
qtl2pleio-package.r
|
#' qtl2pleio.
#'
#' Testing pleiotropy vs. separate QTL in multiparental populations
#'
#' @useDynLib qtl2pleio, .registration = TRUE
#' @name qtl2pleio
#' @docType package
#' @importFrom Rcpp sourceCpp
#' @importFrom stats profile
NULL
.onUnload <- function (libpath) {
library.dynam.unload("qtl2pleio", libpath)
}
## quiets concerns of R CMD check re: the .'s that appear in pipelines
if(getRversion() >= "2.15.1") utils::globalVariables(c(".", "lod", "marker_position", "profile_lod", "loglik", "Var1", "profile", "pleio_max", "marker", "trait", "log10lik", "null_log10lik"))
|
6f2050aec5f67e29ae57d8f48d1a34343def97bc
|
b1364236128077ae0a1ebc81f692268fb5df5ba3
|
/run_analysis.R
|
a16690554465843a8dd235fa31b0beabf4a48845
|
[] |
no_license
|
ppandolf/getdata-015
|
55c2f6db3f9e8ae1940fa035d6e1f95406f3cbe0
|
431537bf7ae8655da934ae0ad128ebceced5e836
|
refs/heads/master
| 2020-05-30T01:28:16.385489
| 2015-07-25T00:25:02
| 2015-07-25T00:25:02
| 37,830,939
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,141
|
r
|
run_analysis.R
|
#################################################################
# run_analysis.R compiles physical activity data collected
# from the accelerometers from the Samsung Galaxy S smartphone
# please see README and code book documentation for details
# this script is written to read input files from the
# /data/UCI HAR Dataset subdirectory within current working directory
#################################################################
library(dplyr)
##################################
# Read All Input Files
##################################
x_test_readings <- read.table("./data/UCI HAR Dataset/test/X_test.txt",colClasses = "numeric",comment.char = "")
y_test_activities <- read.table("./data/UCI HAR Dataset/test/y_test.txt")
z_test_subjects <- read.table("./data/UCI HAR Dataset/test/subject_test.txt")
x_train_readings <- read.table("./data/UCI HAR Dataset/train/X_train.txt", colClasses = "numeric",comment.char = "")
y_train_activities <- read.table("./data/UCI HAR Dataset/train/y_train.txt")
z_train_subjects <- read.table("./data/UCI HAR Dataset/train/subject_train.txt")
activity_labels <- read.table("./data/UCI HAR Dataset/activity_labels.txt")
features <- read.table("./data/UCI HAR Dataset/features.txt")
######################################################
# Assign Subject and Activity ID Variable Names
######################################################
names(z_test_subjects)[1] <- "SubjectID"
names(z_train_subjects)[1] <- "SubjectID"
names(y_test_activities)[1] <- "ActivityID"
names(y_train_activities)[1] <- "ActivityID"
######################################################
# Add Train-Test class variable to Subject Data
######################################################
z_test_subjects <- mutate(z_test_subjects, SubjectClass = "Test")
z_train_subjects <- mutate(z_train_subjects, SubjectClass = "Train")
########################################################################
# Extract - Determine and Select Measurements to Keep For Analysis
# using features file row indexes. Keeping mean() and std(), NOT freqMean
########################################################################
meanStdColumns <- grep("mean|std", features$V2, value = FALSE)
meanFreqColumns <- grep("meanFreq", features$V2, value = FALSE)
colstokeep <- meanStdColumns[!meanStdColumns %in% meanFreqColumns]
xtrct_test <- x_test_readings[,colstokeep]
xtrct_train <- x_train_readings[,colstokeep]
#######################################################################
# Combining Subject, Activity and Reading detail within each class First
# Then the Test and Training samples are (vertically) combined
#######################################################################
xyz_test <- cbind(z_test_subjects, y_test_activities, xtrct_test)
xyz_train <- cbind(z_train_subjects, y_train_activities, xtrct_train)
merged_data <- rbind(xyz_test, xyz_train)
#######################################################################
# Original input and interim tables can be cleared now
#######################################################################
rm(x_test_readings, y_test_activities, z_test_subjects)
rm(x_train_readings, y_train_activities, z_train_subjects)
rm(xtrct_test, xtrct_train)
rm(xyz_test, xyz_train)
#######################################################################
# Apply the Actvity labels
#######################################################################
merged_data$ActivityID[merged_data$ActivityID == 1] <- as.character(activity_labels$V2[1])
merged_data$ActivityID[merged_data$ActivityID == 2] <- as.character(activity_labels$V2[2])
merged_data$ActivityID[merged_data$ActivityID == 3] <- as.character(activity_labels$V2[3])
merged_data$ActivityID[merged_data$ActivityID == 4] <- as.character(activity_labels$V2[4])
merged_data$ActivityID[merged_data$ActivityID == 5] <- as.character(activity_labels$V2[5])
merged_data$ActivityID[merged_data$ActivityID == 6] <- as.character(activity_labels$V2[6])
#######################################################################
# Correct and apply the measurement aka "features" names
#######################################################################
features$V2 <- gsub("BodyBody", "Body", features$V2)
features$V2 <- make.names(features$V2, unique=TRUE, allow_=TRUE)
vnames <- (features$V2[colstokeep])
knames <- c("SubjectID" , "SubjectGroup" ,"Activity")
names(merged_data) <- c(knames, vnames)
#######################################################################
# Calculate arithmetic mean of each measurement variable
# by subject-actvity combination
#######################################################################
arrange(merged_data, Activity, SubjectID)
tdata <- summarize(group_by(merged_data, Activity, SubjectID),
avgtimeBodyAcc.mean.Xaxis = mean(tBodyAcc.mean...X, na.rm = TRUE),
avgtimeBodyAcc.mean.Yaxis = mean(tBodyAcc.mean...Y, na.rm = TRUE),
avgtimeBodyAcc.std.Zaxis = mean(tBodyAcc.std...Z, na.rm = TRUE),
avgtimeGravityAcc.std.Xaxis = mean(tGravityAcc.std...X, na.rm = TRUE),
avgtimeBodyAccJerk.mean.Yaxis = mean(tBodyAccJerk.mean...Y, na.rm = TRUE),
avgtimeBodyAccJerk.std.Zaxis = mean(tBodyAccJerk.std...Z, na.rm = TRUE),
avgtimeBodyGyro.std.Xaxis = mean(tBodyGyro.std...X, na.rm = TRUE),
avgtimeBodyGyroJerk.mean.Yaxis = mean(tBodyGyroJerk.mean...Y, na.rm = TRUE),
avgtimeBodyGyroJerk.std.Zaxis = mean(tBodyGyroJerk.std...Z, na.rm = TRUE),
avgtimeGravityAccMag.std.. = mean(tGravityAccMag.std.., na.rm = TRUE),
avgtimeBodyGyroMag.std.. = mean(tBodyGyroMag.std.., na.rm = TRUE),
avgfreqBodyAcc.mean.Yaxis = mean(fBodyAcc.mean...Y, na.rm = TRUE),
avgfreqBodyAcc.std.Zaxis = mean(fBodyAcc.std...Z, na.rm = TRUE),
avgfreqBodyAccJerk.std.Xaxis = mean(fBodyAccJerk.std...X, na.rm = TRUE),
avgfreqBodyGyro.mean.Yaxis = mean(fBodyGyro.mean...Y, na.rm = TRUE),
avgfreqBodyGyro.std.Zaxis = mean(fBodyGyro.std...Z, na.rm = TRUE),
avgfreqBodyAccJerkMag.std.. = mean(fBodyAccJerkMag.std.., na.rm = TRUE),
avgfreqBodyGyroJerkMag.std.. = mean(fBodyGyroJerkMag.std.., na.rm = TRUE),
avgtimeBodyAcc.mean.Zaxis = mean(tBodyAcc.mean...Z, na.rm = TRUE),
avgtimeGravityAcc.mean.Xaxis = mean(tGravityAcc.mean...X, na.rm = TRUE),
avgtimeGravityAcc.std.Yaxis = mean(tGravityAcc.std...Y, na.rm = TRUE),
avgtimeBodyAccJerk.mean.Zaxis = mean(tBodyAccJerk.mean...Z, na.rm = TRUE),
avgtimeBodyGyro.mean.Xaxis = mean(tBodyGyro.mean...X, na.rm = TRUE),
avgtimeBodyGyro.std.Yaxis = mean(tBodyGyro.std...Y, na.rm = TRUE),
avgtimeBodyGyroJerk.mean.Zaxis = mean(tBodyGyroJerk.mean...Z, na.rm = TRUE),
avgtimeBodyAccMag.mean.. = mean(tBodyAccMag.mean.., na.rm = TRUE),
avgtimeBodyAccJerkMag.mean.. = mean(tBodyAccJerkMag.mean.., na.rm = TRUE),
avgtimeBodyGyroJerkMag.mean.. = mean(tBodyGyroJerkMag.mean.., na.rm = TRUE),
avgfreqBodyAcc.mean.Zaxis = mean(fBodyAcc.mean...Z, na.rm = TRUE),
avgfreqBodyAccJerk.mean.Xaxis = mean(fBodyAccJerk.mean...X, na.rm = TRUE),
avgfreqBodyAccJerk.std.Yaxis = mean(fBodyAccJerk.std...Y, na.rm = TRUE),
avgfreqBodyGyro.mean.Zaxis = mean(fBodyGyro.mean...Z, na.rm = TRUE),
avgfreqBodyAccMag.mean.. = mean(fBodyAccMag.mean.., na.rm = TRUE),
avgfreqBodyGyroMag.mean.. = mean(fBodyGyroMag.mean.., na.rm = TRUE),
avgtimeBodyAcc.std.Xaxis = mean(tBodyAcc.std...X, na.rm = TRUE),
avgtimeGravityAcc.mean.Yaxis = mean(tGravityAcc.mean...Y, na.rm = TRUE),
avgtimeGravityAcc.std.Zaxis = mean(tGravityAcc.std...Z, na.rm = TRUE),
avgtimeBodyAccJerk.std.Xaxis = mean(tBodyAccJerk.std...X, na.rm = TRUE),
avgtimeBodyGyro.mean.Yaxis = mean(tBodyGyro.mean...Y, na.rm = TRUE),
avgtimeBodyGyro.std.Zaxis = mean(tBodyGyro.std...Z, na.rm = TRUE),
avgtimeBodyGyroJerk.std.Xaxis = mean(tBodyGyroJerk.std...X, na.rm = TRUE),
avgtimeBodyAccMag.std.. = mean(tBodyAccMag.std.., na.rm = TRUE),
avgtimeBodyAccJerkMag.std.. = mean(tBodyAccJerkMag.std.., na.rm = TRUE),
avgtimeBodyGyroJerkMag.std.. = mean(tBodyGyroJerkMag.std.., na.rm = TRUE),
avgfreqBodyAcc.std.Xaxis = mean(fBodyAcc.std...X, na.rm = TRUE),
avgfreqBodyAccJerk.mean.Yaxis = mean(fBodyAccJerk.mean...Y, na.rm = TRUE),
avgfreqBodyAccJerk.std.Zaxis = mean(fBodyAccJerk.std...Z, na.rm = TRUE),
avgfreqBodyGyro.std.Xaxis = mean(fBodyGyro.std...X, na.rm = TRUE),
avgfreqBodyAccMag.std.. = mean(fBodyAccMag.std.., na.rm = TRUE),
avgfreqBodyGyroMag.std.. = mean(fBodyGyroMag.std.., na.rm = TRUE),
avgtimeBodyAcc.std.Yaxis = mean(tBodyAcc.std...Y, na.rm = TRUE),
avgtimeGravityAcc.mean.Zaxis = mean(tGravityAcc.mean...Z, na.rm = TRUE),
avgtimeBodyAccJerk.mean.Xaxis = mean(tBodyAccJerk.mean...X, na.rm = TRUE),
avgtimeBodyAccJerk.std.Yaxis = mean(tBodyAccJerk.std...Y, na.rm = TRUE),
avgtimeBodyGyro.mean.Zaxis = mean(tBodyGyro.mean...Z, na.rm = TRUE),
avgtimeBodyGyroJerk.mean.Xaxis = mean(tBodyGyroJerk.mean...X, na.rm = TRUE),
avgtimeBodyGyroJerk.std.Yaxis = mean(tBodyGyroJerk.std...Y, na.rm = TRUE),
avgtimeGravityAccMag.mean.. = mean(tGravityAccMag.mean.., na.rm = TRUE),
avgtimeBodyGyroMag.mean.. = mean(tBodyGyroMag.mean.., na.rm = TRUE),
avgfreqBodyAcc.mean.Xaxis = mean(fBodyAcc.mean...X, na.rm = TRUE),
avgfreqBodyAcc.std.Yaxis = mean(fBodyAcc.std...Y, na.rm = TRUE),
avgfreqBodyAccJerk.mean.Zaxis = mean(fBodyAccJerk.mean...Z, na.rm = TRUE),
avgfreqBodyGyro.mean.Xaxis = mean(fBodyGyro.mean...X, na.rm = TRUE),
avgfreqBodyGyro.std.Yaxis = mean(fBodyGyro.std...Y, na.rm = TRUE),
avgfreqBodyAccJerkMag.mean.. = mean(fBodyAccJerkMag.mean.., na.rm = TRUE),
avgfreqBodyGyroJerkMag.mean.. = mean(fBodyGyroJerkMag.mean.., na.rm = TRUE))
#######################################################################
# Write out dataset for analysis
# commented statements following the write can be used to
# read the dataset back in from the /data subdirectory within
# current working directory
#######################################################################
write.table(tdata, file = "./data/getdata015_HARtidy.txt",row.name=FALSE)
#mytidy <- read.table("./data/getdata015_HARtidy.txt")
#View(mytidy)
|
115cc1da95d3edce37572a9f24a18236a25d378a
|
e36b28012ea9ce785b0767e3498669ed9b377f1b
|
/Package/PPLS.Rcheck/00_pkg_src/PPLS/man/meta_EMstep.Rd
|
057581074f7ac51e3e9844704a1f411aac3a7adf
|
[] |
no_license
|
selbouhaddani/PPLS
|
00bad52886159f085ca406a2a1d309472cdb231c
|
b553e605325782fe5a2ea137bb89b729730701ee
|
refs/heads/master
| 2020-12-14T16:11:02.777289
| 2018-01-23T07:28:55
| 2018-01-23T07:28:55
| 41,370,610
| 0
| 2
| null | 2016-05-18T12:27:02
| 2015-08-25T15:11:17
|
C++
|
UTF-8
|
R
| false
| true
| 1,239
|
rd
|
meta_EMstep.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EM_W_multi.R
\name{meta_EMstep}
\alias{meta_EMstep}
\title{Performs one EM step (use \link{PPLS} for fitting a PPLS model)}
\usage{
meta_EMstep(X, Y, W. = W, C. = C, Ipopu = as.factor(rep(1:2, c(N/2,
N/2))), params = replicate(2, list(B_T = B_T, sigX = sigX, sigY = sigY, sigH
= sigH, sigT = sigT), simplify = F))
}
\arguments{
\item{X}{Numeric matrix.}
\item{Y}{Numeric matrix.}
\item{W.}{Numeric matrix.}
\item{C.}{Numeric matrix.}
\item{Ipopu}{indicator of which populations present}
\item{B_T.}{Numeric}
\item{sigX.}{Numeric}
\item{sigY.}{Numeric}
\item{sigH.}{Numeric}
\item{sigT.}{Numeric}
}
\value{
A list with updated values for\itemize{
\item{W }{Matrix}
\item{C }{Matrix}
\item{B }{Matrix}
\item{sighat }{Vector containing updated sigX and sigY}
\item{siglathat }{Vector containing sigH and sigT}
}
}
\description{
Performs one EM step (use \link{PPLS} for fitting a PPLS model)
}
\details{
This function passes its arguments to EMstepC (a C++ function, see \link{Rcpp}), which returns the expected sufficient statistics.
The maximization is done afterwards in this function. This may become a full C++ function later.
}
|
2e047e435c9dea5e3f18b1401e6bf5a121ce9fe4
|
623a2c07758165f333c79da129f8b4ee196c936c
|
/SVM.R
|
279e8ce39c418f2fd6212da540508985b5a39791
|
[] |
no_license
|
duccioa/01_STDM_WashingtonDC-Crime
|
65ed3f4dd5b3e57207646770a816f816411ad9fd
|
a69d2f2836533c76b9e6952ee33acf03c130d40e
|
refs/heads/master
| 2021-01-10T12:24:54.891266
| 2016-03-30T19:13:49
| 2016-03-30T19:13:49
| 53,758,035
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,066
|
r
|
SVM.R
|
source('./FUN_runSVM.R')
param_df = data.frame(Configuration = c('Std', 'A', 'B', 'C','A_opt','D'),
Type = c('C-svc', 'C-svc', 'C-svc', 'C-svc','C-svc', 'C-svc'),
C_value = c(10, 1, 100, 100,1, 1),
Kernel = c('rbfdot', 'rbfdot','rbfdot','rbfdot','rbfdot','polydot'),
Cross_val = c(0,0,0,10,0,3),
Error = NA,
BelMed = NA,
AbvMed = NA,
SVs = NA)
conf = 'Std'
param = param_df[param_df$Configuration %in% conf,]
Std0 = run.SVM(svm_type = param[1,2],
c_value = param[1,3],
ker = param[1,4], # rbfdot
kp = 'automatic',
cross_val = param[1,5],
yrs_tr = c(2011),
config = conf)
conf = 'Std'
param = param_df[param_df$Configuration %in% conf,]
Std = run.SVM(svm_type = param[1,2],
c_value = param[1,3],
ker = param[1,4], # rbfdot
kp = 'automatic',
cross_val = param[1,5],
yrs_tr = c(2011, 2012, 2013),
config = conf)
param_df[param_df$Configuration %in% conf,6:9] = Std
conf = 'A'
param = param_df[param_df$Configuration %in% conf,]
A = run.SVM(svm_type = param[1,2],
c_value = param[1,3],
ker = param[1,4], # rbfdot
kp = 'automatic',
cross_val = param[1,5],
yrs_tr = c(2011, 2012, 2013),
config = conf)
param_df[param_df$Configuration %in% conf,6:9] = A
conf = 'B'
param = param_df[param_df$Configuration %in% conf,]
B = run.SVM(svm_type = param[1,2],
c_value = param[1,3],
ker = param[1,4], # rbfdot
kp = 'automatic',
cross_val = param[1,5],
yrs_tr = c(2011, 2012, 2013),
config = conf)
param_df[param_df$Configuration %in% conf,6:9] = B
conf = 'C'
param = param_df[param_df$Configuration %in% conf,]
C = run.SVM(svm_type = param[1,2],
c_value = param[1,3],
ker = param[1,4], # rbfdot
kp = 'automatic',
cross_val = param[1,5],
yrs_tr = c(2011, 2012, 2013),
config = conf)
param_df[param_df$Configuration %in% conf,6:9] = C
conf = 'A_opt'
param = param_df[param_df$Configuration %in% conf,]
A_opt = run.SVM(svm_type = param[1,2],
c_value = param[1,3],
ker = param[1,4], # rbfdot
kp = 'automatic',
cross_val = param[1,5],
yrs_tr = c(2011, 2012, 2013),
config = conf,
optimal = T)
param_df[param_df$Configuration %in% conf,6:9] = A_opt
conf = 'D'
param = param_df[param_df$Configuration %in% conf,]
D = run.SVM(svm_type = param[1,2],
c_value = param[1,3],
ker = param[1,4], # polydot
kp = 'automatic',
cross_val = param[1,5],
yrs_tr = c(2011, 2012, 2013),
config = conf)
param_df[param_df$Configuration %in% conf,6:9] = D
|
4f6d542e0a16dd77e29b3c6007a24eb04b3954aa
|
3bd2304fb88609fa21a6012aa436085832a4a70e
|
/run_analysis.R
|
f483f52df4a687028810aa7e0a36da866acf9ebb
|
[] |
no_license
|
dexterwang/GettingAndCleaningDataAssignment4
|
4e6a8b36adb93ca46e89d4a47269045930582819
|
129ab4825058d49a48b4a8d0a626f03a81f04760
|
refs/heads/master
| 2020-12-31T04:56:31.005801
| 2016-04-12T09:15:15
| 2016-04-12T09:15:15
| 56,044,398
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,854
|
r
|
run_analysis.R
|
#1. Merges the training and the test sets to create one data set.
# Load the "X" part of train & test data and merge them together
x_train <- read.table("./UCI HAR Dataset/train/X_train.txt",header=FALSE)
x_test <- read.table("./UCI HAR Dataset/test/X_test.txt",header=FALSE)
x_train_n_test <- rbind(x_train,x_test)
# Load the "Y" part of train & test data (the activity labels) and merge them together
y_train <- read.table("./UCI HAR Dataset/train/Y_train.txt",header=FALSE)
y_test <- read.table("./UCI HAR Dataset/test/Y_test.txt",header=FALSE)
y_train_n_test <- rbind(y_train,y_test)
# Load the subject part of train & test data and merge them together
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt",header=FALSE)
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt",header=FALSE)
subject_train_n_test <- rbind(subject_train,subject_test)
# Load the list of features which will be used as variable names
features <- read.table("./UCI HAR Dataset/features.txt",header=FALSE,sep="\n")
features <- unlist(lapply(as.character(features[,1]), strsplit,split=" "))
# take the odd values from the vector of code/name pairs
features <- features[ c(1:length(features)) %% 2 ==0]
# the full dataset consist of the X, Y and subject with both train and test data
full_dataset <- cbind(x_train_n_test,y_train_n_test,subject_train_n_test)
# use the list of features to name the variables.
# the last "Y" and "subject" columns are named as "activity" and "subject"
names(full_dataset) <- c(features,"activity","subject")
#2. Extracts only the measurements on the mean and standard deviation for each measurement.
# use regular expression to match the columns with "std()" and "mean()" measurements
sub_dataset <- full_dataset[,sort(c(grep("std\\(\\)",names(full_dataset)),grep("mean\\(\\)",names(full_dataset)),562,563))]
#3. Uses descriptive activity names to name the activities in the data set
# load activity labels from file
activities <- read.table("./UCI HAR Dataset/activity_labels.txt",header=FALSE,sep="\n")
activities <- unlist(lapply(as.character(activities[,1]), strsplit,split=" "))
# result in a data frame with an activity code column and corresponding activity name column
activities <- data.frame(code=activities[c(1:length(activities)) %% 2 == 1],name=activities[c(1:length(activities)) %% 2 == 0])
# clean the values, change data type, change string to lower case and replace "_" with " "
activities[,1] <- as.numeric(activities[,1])
activities[,2] <- tolower(activities[,2])
activities[,2] <- gsub("_"," ",activities[,2])
# replace activity code in sub_dataset with activity names
sub_dataset <- merge(sub_dataset,activities,by.x="activity",by.y="code")
sub_dataset <- subset(sub_dataset,select=-activity)
names(sub_dataset)[names(sub_dataset)=="name"] <-"activity"
#4. Appropriately labels the data set with descriptive variable names.
# fix typo in variable names
names(sub_dataset) <- gsub("BodyBody","Body",names(sub_dataset))
#5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject
# aggregate sub_dataset by activity and subject, calculate the average for each variable
by1 <- sub_dataset$activity
by2 <- sub_dataset$subject
tidy_dataset <- aggregate(x=subset(sub_dataset,select=-c(activity,subject)),by=list(by1,by2),FUN="mean")
# swap the activity and subject columns
tidy_dataset<-tidy_dataset[,c(2,1,3:ncol(tidy_dataset))]
names(tidy_dataset)[1] <- "subject"
names(tidy_dataset)[2] <- "activity"
# write tidy dataset into file
write.table(tidy_dataset,file="./tidy_dataset.txt",sep=" ",col.names=TRUE,row.names=FALSE)
# test read tidy dataset from file
testread_tidy_dataset <- read.table("./tidy_dataset.txt",header=TRUE,check.name=FALSE)
|
9b7b6e09757ab881e7ae87c1219bcadd91322943
|
b404a06211d0702b8b4ed40d9a8b05ba3009f02e
|
/R/RankingWeekRanks.r
|
b613d432f662e05d971df5791082131b436c75c1
|
[] |
no_license
|
saiemgilani/cfbd-api-R
|
2b94b8fbeff9462f3eeeee467f932bb2b22c4432
|
84535ae89b8b08eb4f63a7f136d62948a3f34def
|
refs/heads/master
| 2023-03-05T17:49:10.113757
| 2021-02-15T15:24:20
| 2021-02-15T15:24:20
| 339,117,428
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,790
|
r
|
RankingWeekRanks.r
|
# College Football Data API
#
# This is an API for accessing all sorts of college football data. It currently has a wide array of data ranging from play by play to player statistics to game scores and more.
#
# OpenAPI spec version: 2.3.5
# Contact: admin@collegefootballdata.com
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' RankingWeekRanks Class
#'
#' @field rank
#' @field school
#' @field conference
#' @field firstPlaceVotes
#' @field points
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
RankingWeekRanks <- R6::R6Class(
'RankingWeekRanks',
public = list(
`rank` = NULL,
`school` = NULL,
`conference` = NULL,
`firstPlaceVotes` = NULL,
`points` = NULL,
initialize = function(`rank`, `school`, `conference`, `firstPlaceVotes`, `points`){
if (!missing(`rank`)) {
stopifnot(is.numeric(`rank`), length(`rank`) == 1)
self$`rank` <- `rank`
}
if (!missing(`school`)) {
stopifnot(is.character(`school`), length(`school`) == 1)
self$`school` <- `school`
}
if (!missing(`conference`)) {
stopifnot(is.character(`conference`), length(`conference`) == 1)
self$`conference` <- `conference`
}
if (!missing(`firstPlaceVotes`)) {
stopifnot(is.numeric(`firstPlaceVotes`), length(`firstPlaceVotes`) == 1)
self$`firstPlaceVotes` <- `firstPlaceVotes`
}
if (!missing(`points`)) {
stopifnot(is.numeric(`points`), length(`points`) == 1)
self$`points` <- `points`
}
},
toJSON = function() {
RankingWeekRanksObject <- list()
if (!is.null(self$`rank`)) {
RankingWeekRanksObject[['rank']] <- self$`rank`
}
if (!is.null(self$`school`)) {
RankingWeekRanksObject[['school']] <- self$`school`
}
if (!is.null(self$`conference`)) {
RankingWeekRanksObject[['conference']] <- self$`conference`
}
if (!is.null(self$`firstPlaceVotes`)) {
RankingWeekRanksObject[['firstPlaceVotes']] <- self$`firstPlaceVotes`
}
if (!is.null(self$`points`)) {
RankingWeekRanksObject[['points']] <- self$`points`
}
RankingWeekRanksObject
},
fromJSON = function(RankingWeekRanksJson) {
RankingWeekRanksObject <- jsonlite::fromJSON(RankingWeekRanksJson)
if (!is.null(RankingWeekRanksObject$`rank`)) {
self$`rank` <- RankingWeekRanksObject$`rank`
}
if (!is.null(RankingWeekRanksObject$`school`)) {
self$`school` <- RankingWeekRanksObject$`school`
}
if (!is.null(RankingWeekRanksObject$`conference`)) {
self$`conference` <- RankingWeekRanksObject$`conference`
}
if (!is.null(RankingWeekRanksObject$`firstPlaceVotes`)) {
self$`firstPlaceVotes` <- RankingWeekRanksObject$`firstPlaceVotes`
}
if (!is.null(RankingWeekRanksObject$`points`)) {
self$`points` <- RankingWeekRanksObject$`points`
}
},
toJSONString = function() {
sprintf(
'{
"rank": %d,
"school": %s,
"conference": %s,
"firstPlaceVotes": %d,
"points": %d
}',
self$`rank`,
self$`school`,
self$`conference`,
self$`firstPlaceVotes`,
self$`points`
)
},
fromJSONString = function(RankingWeekRanksJson) {
RankingWeekRanksObject <- jsonlite::fromJSON(RankingWeekRanksJson)
self$`rank` <- RankingWeekRanksObject$`rank`
self$`school` <- RankingWeekRanksObject$`school`
self$`conference` <- RankingWeekRanksObject$`conference`
self$`firstPlaceVotes` <- RankingWeekRanksObject$`firstPlaceVotes`
self$`points` <- RankingWeekRanksObject$`points`
}
)
)
|
4eaacdae4df98aa23c9fabc03d6b2770505d948d
|
7ee80c5a6957496471b8060013036b6ee7320937
|
/sortOutNames.R
|
f96a8dc9b8bfdd50361f7ccfdc5ee9bc148ffc8d
|
[] |
no_license
|
Sloth1427/paper_currencies
|
6ee376e130ee12c9d6bce8a79415535ade77a272
|
9c0bd07aac47bb0b5f9512af63647ba26154babf
|
refs/heads/master
| 2022-12-19T10:10:30.564802
| 2020-09-11T16:02:30
| 2020-09-11T16:02:30
| 294,474,581
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,105
|
r
|
sortOutNames.R
|
#Turn author names from "surname, firstname middleinitial" to "surname initials", e.g. "Bloggs JR"
#remove any leading " "
trim.leading <- function (x) sub("^\\s+", "", x)
trim.trailing <- function (x) sub("\\s+$", "", x)
#trim.leading(R01data2005_2015_mainPI_Only$SURNAME_INITALS)
trim.trailing(R01data2005_2015_mainPI_Only$SURNAME_INITALS)
#get rid of anything after 2nd comma, if name contains more than one commma
# for (i in 1:nrow(R01data2005_2015_mainPI_Only){
# if which(R01data2005_2015_mainPI_Only$PI_NAMEs
#get surname and initials from name
# for (i in 1:nrow(R01data2005_2015_mainPI_Only)){
# print(paste0("working on row ", i))
# print(paste0("author :", R01data2005_2015_mainPI_Only$PI_NAMEs[i]))
#
# tempAuthorName <- strsplit(R01data2005_2015_mainPI_Only$PI_NAMEs[i], " ", fixed = FALSE, perl = FALSE, useBytes = FALSE)
#
# tempAuthorName <- unlist(tempAuthorName, recursive = TRUE, use.names = TRUE)
# print(tempAuthorName)
#
# test <- which(tempAuthorName == "")
# if (length(test) > 0){
# tempAuthorName <- tempAuthorName[-(which(tempAuthorName == ""))]
# }
#
# tempString <- tempAuthorName[1]
# for (j in 2:length(tempAuthorName)){
# tempInitial <- as.character(tempAuthorName[j])
# tempString <- paste0(tempString, " ", substring(tempInitial,1,1))
# }
#
# R01data2005_2015_mainPI_Only$SURNAME_INITALS[i] <- tempString
#
# }
#if num initials is > 2, remove all but first two initials
R01data2005_2015_mainPI_Only$SURNAME_INITALS3 <- R01data2005_2015_mainPI_Only$SURNAME_INITALS2
for (i in 1:nrow(R01data2005_2015_mainPI_Only)){
tempSurnameInitials2 <- strsplit(R01data2005_2015_mainPI_Only$SURNAME_INITALS2[i], " ", fixed = FALSE, perl = FALSE, useBytes = FALSE)
tempSurnameInitials2 <- unlist(tempSurnameInitials2, recursive = TRUE, use.names = TRUE)
if (nchar(tempSurnameInitials2[2]) > 2){
#print("temp[2] > 2")
tempSurnameInitials2[2] <- substr(tempSurnameInitials2[2], 1,2)
R01data2005_2015_mainPI_Only$SURNAME_INITALS3[i] <- paste0(tempSurnameInitials2[1]," ",tempSurnameInitials2[2])
}
}
#
|
d65621ea753e947f829a33b808fca00cbc9d6887
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/runner/examples/streak_run.Rd.R
|
39e28e4f33d593086810e18dcc21061887d814e9
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 506
|
r
|
streak_run.Rd.R
|
library(runner)
### Name: streak_run
### Title: Running streak length
### Aliases: streak_run
### ** Examples
set.seed(11)
x1 <- sample(c("a","b"),15,replace=TRUE)
x2 <- sample(c(NA_character_,"a","b"),15,replace=TRUE)
k <- sample(1:4,15,replace=TRUE)
streak_run(x1) # simple streak run
streak_run(x1, k=2) # streak run within 2-element window
streak_run(x2, na_pad=TRUE, k=3) # streak run within k=3 with padding NA
streak_run(x1, k=k) # streak run within varying window size specified by vector k
|
a668dcbac6e11610d68253e0287e90f3742eb53b
|
724fa2771be1a900d6e935ea0ce79f5c54698098
|
/snippets/mvr_ext.R
|
9e302c5dd9ed8ba66c3d07ea7d10c70535b270b0
|
[] |
no_license
|
Katiedaisey/chemometrics
|
b4dbc656ae2ca8cc74ff219f9121f325becd455e
|
9b82ea3103f664bda75085d965682bacb6d6ad91
|
refs/heads/master
| 2021-01-10T19:02:08.671377
| 2015-04-24T14:20:46
| 2015-04-24T14:20:46
| 30,512,418
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,340
|
r
|
mvr_ext.R
|
RMSEPCV<-function(reg,plot=T){
#RMSEPCV from plsr/mvr/pcr
CV<-NULL
for(i in 1:reg$ncomp){
CV<-append(CV,sqrt(sum((prop-reg$validation$pred[,,i])^2)/20))
}
#%variance from plsr/mvr
#prop variance
prop_exp<-NULL
for(i in 1:reg$ncomp){
a<-prop-reg$fitted.values[,,i]
prop_exp<-append(prop_exp,100*(var(prop)-var(a))/var(prop))
}
#spec variance
x_exp<-NULL
a<-0
for(i in 1: reg$ncomp){
a<-a+(100*reg$Xvar[i]/reg$Xtotvar)
x_exp<-append(x_exp,a)
}
plot1<-plot(CV,type="b",col="red",xlab="Number of Components", ylab="RMSEP CV", main="PLS")
plot2<-plot(prop_exp,type="b",col="green",
xlab="Number of Components", ylab="% Variance Explained",main=paste0("Variance in " ,reg$terms[[2]] ))
plot3<-plot(x_exp,type="b",col="green",
xlab="Number of Components", ylab="% Variance Explained",main=paste0("Variance in " ,reg$terms[[3]] ))
#usually not helpful
#plot4<-plot(reg$fitted.values[,,1] ~ get(as.character(reg$terms[[2]])), xlab="Measured Level",ylab="Predicted Level", main="PLS Regression- Scaled");abline(0,1,col="red")
#points(reg1$fitted.values[,,5] ~ get(as.character(reg1$terms[[2]])),col=6)
#adjust this - this will find smallest possible change
CVmin<-NULL
for(i in 1:(length(CV)-1)){
a<-CV[i]-CV[(i+1)]
CVmin<-append(CVmin,a)
}
#rownames(CVmin)<-2:(length(CV))
comp<-match(min(CVmin),CVmin)
comp2<-match(min(CV),CV)
Y<-get(as.character(reg$terms[[2]]))
index<-1:length(Y)
plot5<-matplot(index,Y,type="l",ylab="Property",xlab="Index",main="PLS Regression- Scaled")
matpoints(index,reg$fitted.values[,,comp2],type="p",col="blue", pch=1)
plot6<-matplot(index,reg$residuals[,,comp2],type="l",ylab="Residuals",col="blue",xlab="Index",main="Liquid-Fed Ceramic Melter: PLS Regression- Scaled")
abline(h=0, col="gray")
plot7<-plot((1:dim(reg$coefficients)[[1]]),reg$coefficients[,,comp],type="l",col="red",xlab="Variable No.", ylab="Coefficient", main="PLS Regression, Scaled",)
abline(h=0, col="gray")
#return
reg_RMSEP<-list(CV,prop_exp,x_exp,CVmin,comp,comp2)
names(reg_RMSEP)<-c("RMSEPCV",paste0("Var_explained_",reg$terms[[2]]),paste0("Var_explained_",reg$terms[[3]]),"CV_min","suggested_comp_pcr","suggested_comp_CV")
return(reg_RMSEP)
if(plot==T){
return(plot1)
return(plot2)
return(plot3)
return(plot5)
return(plot6)
return(plot7)
}
}
|
7fce3ca0dfd3cd14852366a114e5bf8ff21296e2
|
69b4c16c230cba3c065b3bad440f305e3b5474e4
|
/hirano2000/hirano.R
|
21c978ef0bf57568e7b01bf82512dd657ea68e56
|
[] |
no_license
|
ptoulis/little-projects
|
13a46f3b8b7d098690949be42658000f68d39dcf
|
588226c7238f32d9c184607ea10ee6e9c18c08eb
|
refs/heads/master
| 2021-01-19T00:19:36.439675
| 2014-10-03T16:20:01
| 2014-10-03T16:20:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,071
|
r
|
hirano.R
|
## Stat 240 Homework 2. Analysis of Hirano et. al.
##
## Panos Toulis, panos.toulis@gmail.com
source("terminology.R")
library(coda)
get.compliance.probs <- function(theta, X) {
# Computes Ψ = (ψ_it) = P(Ci=t | Xi, θ)
#
# Returns: (N x p) matrix of probabilities.
#
N = nrow(X)
p = ncol(X)
Pred = X %*% t(get.psi.matrix(theta))
# if large/small values become a problem uncomment these.
#Pred[Pred < -100] = -100
#Pred[Pred > 100] = 100
Psi = exp(Pred) # (N x 3) matrix
r = rowSums(Psi)
Psi = Psi / r # now Psi(i, t) = P(Ci=t | Xi, θ)
CHECK_TRUE(all(!is.na(Psi)), msg="NA values in the Psi matrix..")
return(Psi)
}
conditional.Ptyz <- function(t, z, Y, X, theta) {
# Computes conditionl distribution over all units of
# Y | C=t, ...
#
# Args:
# t = compliance type
# z = {0, 1}, treatment assignment
# Y = vector of outcomes
# X = covariates (N x p) matrix
# Ψ = N x 3 matrix of category probabilities (Ψij = P(Ci = j | X, θ))
#
# Returns:
# V = (v_i) where v_i = P(Yi=yi, Zi=z | Ci=t, Xi, θ)
CHECK_EQ(nrow(X), length(Y))
CHECK_MEMBER(z, c(0,1), msg="z in {0,1}")
beta = get.beta.vector(t, z, theta) # 3x1 vector
preds = X %*% beta
L1 = logistic(preds)
# 1. Get predictive probabilities
Ltz = Y * L1 + (1-Y) * (1-L1) # (N X 1) where Ltz_i = P(Yi=yi | Ci=t, Zi=z, Xi, θ)
return(Ltz) # ( N x 1) vector s.t. v(i) = P(Yi=1 | Ci=t, Zi=t, Xi, θ)
}
joint.Ptyz <- function(t, z, Y, X, theta) {
# Computes joint distribution over all units of
# C=t (compliance) and Y (outcomes) given the specified *individual* assignment
#
# Args:
# t = compliance type
# z = {0, 1}, treatment assignment
# Y = vector of outcomes
# X = covariates (N x p) matrix
# Ψ = N x 3 matrix of category probabilities (Ψij = P(Ci = j | X, θ))
#
# Returns:
# V = (v_i) where v_i = P(Ci=t, Zi=z, Yi=yi | Xi, θ)
#
# 1. Get conditional/predictive probabilities
Ltz = conditional.Ptyz(t, z, Y, X, theta)
# 2. Get compliance "prior" probabilities.
Psi = get.compliance.probs(theta, X)
CHECK_EQ(length(Ltz), length(Psi[, t]))
return(Ltz * Psi[, t]) # ( N x 1) vector s.t. v(i) = P(Yi=1 | Ci=t, Zi=t, Xi, θ) * P(Ci=t|...)
}
get.stratified.probs <- function(Data, theta) {
# Stratifies by compliance type and then computes data probabilities.
#
# Returns: list(Szw, Qtz)
#
# Sij = (N x 1) vector where Szw_i = 1 if Zi=z and Wi=1
# So, S01 are those Zi=0 and Wi=1, i.e. always-takers
# S00 are those Zi=0 and Wi=0, so either compliers or never-takers.
#
# Qtz = (N x 1) vector with Qtz_i = P(Ci=t, Zi=z, Y=Yobs_i | Χ_i, θ)
#
CHECK_Data(Data)
CHECK_Theta(theta)
Qtz <- function(t, Zi) {
joint.Ptyz(t, z=Zi, Data$Y, Data$X, theta)
}
# Qtz = P(Y, C=t, Z=z | X, θ) = (Nx1) matrix
# i.e. Qtz_i = P(Yi=ti, Ci=t, Zi=z | Xi, θ)
Qn0 = Qtz(t=kTYPE_NEVERTAKER, Zi=0)
Qn1 = Qtz(t=kTYPE_NEVERTAKER, Zi=1)
Qc0 = Qtz(t=kTYPE_COMPLIER, Zi=0)
Qc1 = Qtz(t=kTYPE_COMPLIER, Zi=1)
Qa0 = Qtz(t=kTYPE_ALWAYSTAKER, Zi=0)
Qa1 = Qtz(t=kTYPE_ALWAYSTAKER, Zi=1)
W = Data$W
Z = Data$Z
S00 = (1-Z) * (1-W)
S10 = Z * (1-W)
S01 = (1-Z) * W
S11 = Z * W
CHECK_SETEQ(S00 + S01 + S11 + S10, c(1), msg="disjoint")
return(list(S00=S00, S10=S10, S01=S01, S11=S11,
Qn0=Qn0, Qn1=Qn1,
Qc0=Qc0, Qc1=Qc1,
Qa0=Qa0, Qa1=Qa1))
}
log.likelihood <- function(theta, Data) {
# Computes observed log-likelihood
# Tries to linear-algebrize computations.
# About 300x faster than the 'slow' version which iterates over units.
#
Cprobs <- get.stratified.probs(Data, theta)
# Qtz = P(data | θ, C=t, Z=z)
ll = with(Cprobs, {sum(S00 * log(Qc0 + Qn0) + S10 * log(Qn1) +
S01 * log(Qa0) + S11 * log(Qc1 + Qa1))
})
return(ll)
}
log.likelihood.complete <- function(theta, compliance, Data) {
# See the difference with obs log likelihood:
#
#
CHECK_Theta(theta)
CHECK_EQ(length(Data$Y), length(compliance))
CHECK_MEMBER(compliance, kTYPES, msg="correct compliance codes")
# Check compatibility of compliance with data.
alwaystakers = which(((1-Data$Z) * Data$W) == 1)
nevertakers = which((Data$Z * (1-Data$W))==1)
CHECK_MEMBER(alwaystakers, which(compliance==kTYPE_ALWAYSTAKER))
CHECK_MEMBER(nevertakers, which(compliance==kTYPE_NEVERTAKER))
s = 0
for (t in kTYPES) {
Ct = as.numeric(compliance==t)
Qt1= joint.Ptyz(t, 1, Data$Y, Data$X, theta)
Qt0= joint.Ptyz(t, 0, Data$Y, Data$X, theta)
Zt1 = Ct * (Data$Z==1) # units with Zi=1, Ci=t
Zt0 = Ct * (Data$Z==0) # units with Zi=0 Ci=t
s = s + sum(Zt1 * log(Qt1) + Zt0 * log(Qt0))
}
return(s)
}
log.prior <- function(theta, X) {
## Defines the prior as in Hirano et. al.
# Faster because it is using a vector format for all units.
s = 0
N = nrow(X)
for(t in kTYPES)
for(Zi in c(0, 1))
for(y in c(0, 1))
s = s + sum(log(joint.Ptyz(t=t, z=Zi, Y=rep(y, N), X=X, theta=theta)))
return(2.5 * s / N)
}
Gibbs.sample.compliance <- function(Data, theta) {
# Given the data, it will sample compliance types for the units
#
# Args: DATA and THETA
#
# Returns:
# (N x 1) of compliance types (in kTYPES)
# P(C | Z, W, θ)
# e.g. if θ=0 then for Z=0 and W=0 we should pick equally between Complier + Nevertaker
#
N = length(Data$Y)
C = rep(NA, N)
Cprobs = get.stratified.probs(Data, theta)
# Qii = first column = complier probabilities
# second column = either always-taker or never-taker
Q11 = matrix(c(Cprobs$Qc1, Cprobs$Qa1), nrow=N, ncol=2)
Q00 = matrix(c(Cprobs$Qc0, Cprobs$Qn0), nrow=N, ncol=2)
CA = c(kTYPE_COMPLIER, kTYPE_ALWAYSTAKER)
CN = c(kTYPE_COMPLIER, kTYPE_NEVERTAKER)
sample.type11 = apply(Q11, 1, function(x) {
if(all(x==0)) return(sample(CA, size=1))
sample(CA, size=1, prob=x)
})
sample.type00 = apply(Q00, 1, function(x) {
if(all(x==0)) return(sample(CN, size=1))
return(sample(CN, size=1, prob=x))
})
return(with(Cprobs, { S01 * kTYPE_ALWAYSTAKER + S10 * kTYPE_NEVERTAKER +
S11 * sample.type11 + S00 * sample.type00 }))
}
run.mcmc <- function(theta0, Data,
log.density,
niters=1000, proposal.scale=0.8) {
# Runs an MCMC for the problem.
#
# Args:
# Data = DATA object (See terminology)
CHECK_Data(Data)
# no. of units.
N = length(Data$Y)
# Compliance chain (N x niters)
C.chain = matrix(NA, nrow=N, ncol=niters)
C.chain[, 1] = sample(kTYPES, size=N, replace=T)
# Chain of θ vectors (unpacked θ)
V.chain = matrix(NA, nrow=length(unpack.theta(theta0)), ncol=niters)
V.chain[, 1] = unpack.theta(theta0)
print("Running MCMC")
pb = txtProgressBar(style=3)
# Keep chain statistics here.
chain.stats = list(acc=c(0))
for (t in 2:niters) {
# 1. Sample the compliance types
v.old = V.chain[, t-1]
compliance.old = C.chain[, t-1]
theta.old = pack.theta.vector(theta.vector=v.old, base.theta=theta0)
compliance.new = Gibbs.sample.compliance(Data, theta.old)
# 2. Sample new theta (Metropolis steps for each component)
n0 = length(v.old)
v.new = v.old
# Runs intermediate MH steps.
for(index in 1:n0) {
v.cand = v.new
v.cand[index] <- v.cand[index] + proposal.scale * rt(1, df=5)
theta.cand = pack.theta.vector(theta.vector=v.cand, base.theta=theta0)
log.acc = min(0, log.density(theta=theta.cand, compliance=compliance.new, Data=Data) -
log.density(theta=theta.old, compliance=compliance.new, Data=Data))
acc = exp(log.acc)
if(!is.na(acc)) {
## can be NaN if it tries a very weird value (remember we are trying t-proposals)
chain.stats$acc <- c(chain.stats$acc, acc)
if(runif(1) < acc) {
v.new = v.cand
}
}
}
# Now v.new = new θ vector
m.acc = mean(tail(chain.stats$acc, 200))
if(m.acc < 0.2) {
print(sprintf("WARNING: Chain has small acceptance rate %.2f", m.acc))
} else if(t %in% round(seq(10, niters, length.out=10))) {
print(sprintf("iter =%d/%d Acceptance rate = %.2f%%", t, niters, 100 * m.acc))
}
# Updates
C.chain[, t] <- compliance.new
V.chain[, t] <- v.new
setTxtProgressBar(pb, value=t/niters)
} # MCMC iterations.
keep.iters = tail(1:niters, 0.9 * niters)
print("Statistics of acceptance probability.")
print(summary(chain.stats$acc))
return(list(theta0=theta0, Data=Data,
theta.vector.chain=V.chain[, keep.iters],
compliance.chain=C.chain[, keep.iters]))
}
mcmc.full.posterior <- function(theta0, Data, niters=1000, proposal.scale=0.1) {
# Runs MCMC with the prior only
# We call it catalytic because it should match the ITT that is
# contained in the DATA itself
log.density <- function(theta, compliance, Data) {
return(log.likelihood.complete(theta, compliance, Data=Data) +
log.prior(theta=theta, X=Data$X))
}
return(run.mcmc(theta0=theta0,
Data=Data,
log.density=log.density,
niters=niters,
proposal.scale=proposal.scale))
}
mcmc.baseline <- function(Data, niters=1000, proposal.scale=0.1) {
# Runs MCMC with a bad density/proposal function
# Just to make sure
theta0 = perturb.theta(empty.theta(), epsilon=3)
log.density <- function(theta, compliance, Data) {
return(log(runif(1)))
}
return(run.mcmc(theta0=theta0,
Data=Data,
log.density=log.density,
niters=niters,
proposal.scale=proposal.scale))
}
mcmc.prior <- function(nsamples=100) {
# Tests whether the prior gives informative ITT estimates:
#
# 1. Sample θ from the prior
# 2. Sample ITT | θ
# 3. Iterate
#
# Finally return the samples ITTs. These should be centered around 0
# since we want the prior not to give much information about the ITTs.
#
theta0 = empty.theta()
v.old = unpack.theta(theta0)
p = length(v.old)
v.new = v.old
X = sample.X(1000)
itt.samples = list()
for(t in kTYPES) {
itt.samples[[t]] = rep(NA, nsamples)
}
chain.stats = list(acc=c())
pb = txtProgressBar(style=3)
for (t in 1:nsamples) {
# Gibbs-MH for a new θ
v.cand = v.new
for(i in 1:p) {
v.cand = v.new
v.cand[i] <- v.cand[i] + rt(1, df=5)
theta.cand = pack.theta.vector(v.cand, base.theta=theta0)
theta.new = pack.theta.vector(v.new, base.theta=theta0)
log.acc = min(0, log.prior(theta=theta.cand, X=X) -
log.prior(theta=theta.new, X=X))
# print(log.acc)
acc = exp(log.acc)
if(!is.na(acc)) {
## can be NaN if it tries a very weird value (remember we are trying t-proposals)
chain.stats$acc <- c(chain.stats$acc, acc)
if(runif(1) < acc) {
v.new = v.cand
}
}
}
# Done with sampling θ.new
# v.new has the sampled parameter vector
v.old = v.new
theta.new = pack.theta.vector(v.new, base.theta=theta0)
itt = sample.ITT.theta(theta=theta.new, X=X, nsamples=1)
for(j in 1:length(itt)) {
itt.samples[[j]][t] <- itt[[j]][1]
}
setTxtProgressBar(pb, value=t/nsamples)
}
print("Acceptance summary")
print(summary(chain.stats$acc))
return(itt.samples)
}
## Sample ITT
sample.ITT.theta <- function(theta, X, nsamples=100) {
N = nrow(X)
Psi = get.compliance.probs(theta, X)
ITT = list()
for(t in kTYPES) {
ITT[[t]] = rep(0, nsamples)
}
for(i in 1:nsamples) {
compliance = apply(Psi, 1, function(x) sample(kTYPES, size=1, prob=x))
CHECK_EQ(length(compliance), N)
for(t in kTYPES) {
It = as.numeric(compliance==t)
Nt = sum(It)
Yt1 = rbinom(N, size=1, prob=logistic(X %*% get.beta.vector(t, z=1, theta=theta)))
Yt0 = rbinom(N, size=1, prob=logistic(X %*% get.beta.vector(t, z=0, theta=theta)))
CHECK_EQ(length(Yt1), N)
estimate = ifelse(Nt==0, 0, sum(It * (Yt1-Yt0)) / Nt)
ITT[[t]][i] <- estimate
}
}
return(ITT)
}
sample.ITT.mcmc <- function(mcmc.out, use.obs=T) {
# Given a matrix of θ, Compliance, sample the ITT effects.
# 1. For every θj, Cj (j=iteration):
# 1a. Sample Y1, Y0 through β_tz (z=1, 0 resp.) for every t
# 1b. Then impute Y1, Y0 for type t
# 1c. Compute ITT effects only for those of type t (Cj_i==t)
#
# Returns:
# LIST(n, c, a) for nevertakers, compliers and alwaystakers respectively.
compliance.matrix = mcmc.out$compliance.chain
theta.vector.matrix = mcmc.out$theta.vector.chain
Data = mcmc.out$Data
theta0 = mcmc.out$theta0
CHECK_Data(Data)
warning("No test for sample.iTT")
N = length(Data$Y)
niters = ncol(compliance.matrix)
CHECK_EQ(ncol(theta.vector.matrix), niters)
CHECK_EQ(dim(compliance.matrix), c(N, niters))
# return object.
out <- list(N=c(), C=c(), A=c())
add.value <- function(compliance.type, value) {
name = kComplianceTypes[compliance.type]
out[[name]] <- c(out[[name]], value)
return(out)
}
for(iter in 1:ncol(compliance.matrix)) {
v.iter <- theta.vector.matrix[, iter]
C.iter <- compliance.matrix[, iter]
theta.iter = pack.theta.vector(theta.vector=v.iter, base.theta=theta0)
for(t in kTYPES) {
# Ct = (0, 1, 1, 0, 0..) where 1 if i is of type t
Ct = as.numeric(C.iter == t)
# Nt = #units of type t
Nt = sum(Ct)
if(Nt==0) {
out <- add.value(t, 0)
next
}
# impute potential outcomes
bt1 = get.beta.vector(t=t, z=1, theta=theta.iter)
preds1 = Data$X %*% bt1
# Sample Y | Ci=t, Zi=1, Xi, θ)
Y1.sample = rbinom(N, size=1, prob=logistic(preds1))
# Sample Y | Ci=t, Zi=0, Xi, θ
bt0 = get.beta.vector(t=t, z=0, theta=theta.iter)
preds0 = Data$X %*% bt0
Y0.sample = rbinom(N, size=1, prob=logistic(preds0))
# encouraged
Z1 = as.numeric(Data$Z == 1)
Yobs = Data$Y
# Impute potential outcomes
# Yi(Zi=1, Ci=t) = {
# 0 if Ci != t
# or
# Yobs_i if Z=1
# Y1_i if Z=0
Y1.imputed = Y1.sample
Y0.imputed = Y0.sample
if(use.obs) {
Y1.imputed = Ct * (Yobs * Z1 + Y1.sample * (1-Z1))
Y0.imputed = Ct * (Yobs * (1-Z1) + Y0.sample * Z1)
}
out <- add.value(t, sum(Y1.imputed - Y0.imputed) / Nt)
}
}
return(out)
}
summarize.ITT.samples <- function(ITT, theta) {
par(mfrow=c(3,1))
X = sample.X(1000)
full.names = list(N="Never-taker", C="Complier", A="Always-taker")
for(t in 1:length(ITT)) {
print(sprintf("Summary for compliance %s", t))
print(summary(ITT[[t]]))
print("Heuristic derivation:")
m1 = mean(logistic(X %*% get.beta.vector(t, z=1, theta=theta)))
m0 = mean(logistic(X %*% get.beta.vector(t, z=0, theta=theta)))
print(sprintf("Heuristic difference (ITT effect) = %.3f", m1-m0))
hist(ITT[[t]], xlab="ITT effect", breaks=50, main=sprintf("Compliance type %s", full.names[[t]]))
}
}
q1a <- function() {
D = read.data()
ITT.effect <- function(A, B) {
# A is partitioning B
CHECK_MEMBER(A, c(0, 1))
B1 = B[A==1]
B0 = B[A==0]
ITTb = mean(B1) - mean(B0)
boot.itt <- sd(replicate(1000, { B1rep = sample(B1, size=length(B1), replace=T);
B0rep = sample(B0, size=length(B0), replace=T);
return(mean(B1rep) - mean(B0rep)) }))
return(list(itt=ITTb, se=boot.itt))
}
ITTw = ITT.effect(D$Z, D$W)
print(sprintf("ITT effect on W = %.3f se=%.3f", ITTw$itt, ITTw$se))
ITTy = ITT.effect(D$Z, D$Y)
print(sprintf("ITT effect on Y= %.3f se=%.3f", ITTy$itt, ITTy$se))
# IV estimate
Pc = ITTw$itt
Pa = sum((D$W * (1-D$Z))) / sum(D$Z==0)
Pn = 1-Pa-Pc
print(sprintf("Nevertakers=%.2f Compliers=%.2f Alwaystakers=%.2f", Pn, Pc, Pa))
get.cace.effect = function(W, Z, Y) {
get.Yzw.obs <- function(z, w) {
return(mean(Y[W==w & Z==z]))
}
Y0c.obs = get.Yzw.obs(0, 0)
Y1c.obs = get.Yzw.obs(1, 0)
Y0t.obs = get.Yzw.obs(0, 1)
Y1t.obs = get.Yzw.obs(1, 1)
EY0.complier = (Y0c.obs - Y1c.obs * Pn / (Pn + Pc)) / (Pc / (Pc + Pn))
EY1.complier = (Y1t.obs - Y0t.obs * Pa / (Pa + Pc)) / (Pc / (Pc + Pa))
# print(sprintf("Compliers Y0= %.3f Y1=%.3f", EY0.complier, EY1.complier))
tau.cace = EY1.complier - EY0.complier
# print(sprintf("CACE effect = %.3f", tau.cace))
return(tau.cace)
}
tau = get.cace.effect(D$W, D$Z, D$Y)
N = length(D$Y)
# Bootstrap.
se = sd(replicate(1000, { units = sample(1:N, size=N, replace=T);
W = D$W[units]
Z = D$Z[units]
Y = D$Y[units]
get.cace.effect(W, Z, Y)
}))
print(sprintf("Outcome = %.3f se=%.3f", tau, se))
}
|
4cd1e2b33ac6b7fef542575fe9660a4b39483ac4
|
6e913f34e5c51da1f1dc936d79bb30d23313a6cf
|
/tests/testthat/test_hy_stn_regulation.R
|
e258e271b670b406f06046d2399d630ae3e3237b
|
[
"Apache-2.0"
] |
permissive
|
ropensci/tidyhydat
|
8c81e07d08089aee73173676d6d579f6c1fbcf5b
|
00ee7ba237416536e0c5de92692c92ac5f5b0cd2
|
refs/heads/main
| 2023-08-31T06:49:38.270327
| 2023-08-17T21:59:32
| 2023-08-17T21:59:32
| 100,978,874
| 78
| 21
|
Apache-2.0
| 2023-08-17T21:58:25
| 2017-08-21T18:01:23
|
R
|
UTF-8
|
R
| false
| false
| 1,257
|
r
|
test_hy_stn_regulation.R
|
test_that("hy_stn_regulation accepts single and multiple province arguments", {
stns <- "08NM083"
expect_identical(unique(
hy_stn_regulation(
station_number = stns,
hydat_path = hy_test_db()
)$STATION_NUMBER
), stns)
expect_identical(length(unique(
hy_stn_regulation(
station_number = c("08NM083", "08NE102"),
hydat_path = hy_test_db()
)$STATION_NUMBER
)), length(c("08NM083", "08NE102")))
})
test_that("hy_stn_regulation accepts single and multiple province arguments", {
expect_true(nrow(
hy_stn_regulation(
prov_terr_state_loc = "BC",
hydat_path = hy_test_db()
)
) >= 1)
expect_true(nrow(
hy_stn_regulation(
prov_terr_state_loc = c("BC", "YT"),
hydat_path = hy_test_db()
)
) >= 1)
})
test_that("hy_stn_regulation produces an error when a province is not specified correctly", {
expect_error(hy_stn_regulation(
prov_terr_state_loc = "BCD",
hydat_path = hy_test_db()
))
expect_error(hy_stn_regulation(
prov_terr_state_loc = c("AB", "BCD"),
hydat_path = hy_test_db()
))
})
test_that("hy_stn_regulation gather data when no arguments are supplied", {
expect_true(nrow(hy_stn_regulation(
hydat_path = hy_test_db()
)) >= 1)
})
|
e738363d14d363bf5aa4c5c0734b04e20fc90cd8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/phenofit/examples/PhenoExtractMeth.Rd.R
|
17e50669eb5f08ddc87560c05d66cb1ad95a592b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 626
|
r
|
PhenoExtractMeth.Rd.R
|
library(phenofit)
### Name: PhenoExtractMeth
### Title: Phenology Extraction methods
### Aliases: PhenoExtractMeth PhenoTrs PhenoDeriv PhenoGu PhenoKl
### ** Examples
library(phenofit)
# simulate vegetation time-series
fFUN = doubleLog.Beck
par = c(
mn = 0.1,
mx = 0.7,
sos = 50,
rsp = 0.1,
eos = 250,
rau = 0.1)
t <- seq(1, 365, 8)
tout <- seq(1, 365, 1)
y <- fFUN(par, t)
methods <- c("AG", "Beck", "Elmore", "Gu", "Zhang") # "Klos" too slow
fFITs <- curvefit(y, t, tout, methods)
fFIT <- fFITs$fFIT$AG
par(mfrow = c(2, 2))
PhenoTrs(fFIT)
PhenoDeriv(fFIT)
PhenoGu(fFIT)
PhenoKl(fFIT)
|
e59d98d30f1c00687559bb68aa081261c2d067d4
|
44a3fad6338a63ac5417b1e52e47420c0e013f45
|
/R/ConfidenceBands.R
|
7fc52cad2ca1cb66dc5db361c8ffb3c4083bbf44
|
[] |
no_license
|
cran/ExtremalDep
|
4faac60ce0040262a98410edc6488ddf939ad9bd
|
18238416ddb6567610c4457dc332316272dbd16e
|
refs/heads/master
| 2023-03-06T18:03:59.304908
| 2023-02-26T14:40:02
| 2023-02-26T14:40:02
| 236,595,530
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,820
|
r
|
ConfidenceBands.R
|
#############################################################
### Authors: Giulia Marcon and Simone Padoan ###
### Emails: giulia.marcon@phd.unibocconi.it, ###
### simone.padoan@unibocconi.it ###
### Institution: Department of Decision Sciences, ###
### University Bocconi of Milan ###
### File name: ConfidenceBands.r ###
### Description: ###
### This file contains routines that compute ###
### (1-a)% bootstrap confident bands for the estimated ###
### Pickands dependence function ###
### Last change: 15/08/2016 ###
#############################################################
# Subroutine that computes resampled estimates
beed.boot <- function(data, x, d=3, est = c("ht", "md", "cfg", "pick"),
margin=c("emp", "est", "exp", "frechet", "gumbel"),
k = 13, nboot = 500, y = NULL,
print = FALSE){
nx <- nrow(x)
ndata <- nrow(data)
ddata <- ncol(data)
if(d!=ddata)
stop("`data' must be a matrix/data.frame with `d' columns")
bootsamp <- matrix(0, nrow=ndata, ncol=d)
if(is.null(y))
fit <- beed(data = data, x = x, d = d, est = est, k = k,
plot = FALSE, margin = margin)
else fit <- y
A.tilde <- fit$A
beta.tilde <- fit$beta
beta <- bootA <- NULL
for(i in 1 : nboot)
{
indx <- sample(1:ndata, ndata, replace = TRUE)
bootsamp <- data[indx, ]
mod <- beed(data = bootsamp, x = x, d = d, est = est, k = k,
plot = FALSE, margin = margin)
b <- mod$beta
beta <- cbind(beta,b)
bootA <- cbind(bootA, mod$A)
if(print){
print.i <- seq(0, nboot, by=100)
if(i %in% print.i) message(paste(c('iteration', i, 'out of', nboot,'\n')))
}
}
out=list(A=A.tilde,bootA=bootA,beta=beta)
invisible(out)
}
# Main routine that computes the confidence bands
beed.confband <- function(data, x, d=3, est = c("ht", "md", "cfg", "pick"),
margin=c("emp", "est", "exp", "frechet", "gumbel"),
k = 13, nboot = 500, y = NULL, conf = 0.95,
plot = FALSE, print = FALSE){
nx <- nrow(x)
ndata <- nrow(data)
ddata <- ncol(data)
if(d!=ddata)
stop("`data' must be a matrix/data.frame with `d' columns")
# Boostrap
if(is.null(y))
fit <- beed.boot(data = data, x = x, d = d, est = est, k = k,
nboot = nboot, print = print,
margin = margin)
else
fit <- beed.boot(data = data, x = x, d = d, est = est, k = k,
y = y, nboot = nboot, print = print, margin = margin)
A.tilde <- fit$A
bootA <- fit$bootA
beta <- fit$beta
alpha <- 1-conf
# Confidence bands pointwise on x
A.low.pointwise <- A.up.pointwise <- numeric(nx)
for (i in 1:nx){
ord <- sort(bootA[i,])
A.low.pointwise[i] <- ord[round(nboot*(alpha/2))]
A.up.pointwise[i] <- ord[round(nboot*(1-alpha/2))]
}
# Nnumber of coefficients
p <- nrow(beta)
# Confidence bands on each beta
low.beta <- up.beta <- numeric(p)
for (i in 1:p){
ord <- sort(beta[i,])
low.beta[i] <- ord[round(nboot*(alpha/2))]
up.beta[i] <- ord[round(nboot*(1-alpha/2))]
}
A.up.beta <- beed(data = data, x = x, d = d, est = est, k = k,
beta = up.beta, margin = margin)$A
A.low.beta <- beed(data = data, x = x, d = d, est = est, k = k,
beta = low.beta, margin = margin)$A
if (plot == TRUE){
if(d == 2){
plot(x[,1],x[,2],type='n',xlab='w',ylab='A(w)',ylim=c(.5,1))
polygon(c(0, 0.5, 1), c(1, 0.5, 1), lty = 1, lwd = 1, border = 'grey')
lines(x[,1],A.tilde,lty=1,col=1)
lines(x[,1],A.low.beta,lty=2,col=1)
lines(x[,1],A.up.beta,lty=2,col=1)
}
if(d == 3){
numg <- sqrt(nx)
xy <- seq(0,1,length=numg)
mat <- matrix(A.tilde, numg, numg)
matup <- matrix(A.up.beta, numg, numg)
matlow <- matrix(A.low.beta, numg, numg)
plot(xy, xy, type='n', xlab=expression(w[1]), ylab=expression(w[2]))
image(x=xy, y=xy, z=mat, col=heat.colors(numg),add=TRUE)
contour(x=xy, y=xy, z=mat, add=T,col='black',labcex=.6,lty=1)
contour(x=xy, y=xy, z=matup, add=T,col='black',labcex=.6,lty=2)
contour(x=xy, y=xy, z=matlow, add=T,col='black',labcex=.6,lty=2)
}
if (d >= 4)
warning("cannot plot in high dimensions")
}
out=list(A=A.tilde,A.up.beta=A.up.beta,A.low.beta=A.low.beta,
bootA=bootA,A.up.pointwise=A.up.pointwise,
A.low.pointwise=A.low.pointwise,up.beta=up.beta,
low.beta=low.beta)
invisible(out)
}
|
ebb42bec320b7a59e21a5102681df819c97de508
|
a5fc86001154f8b81c28009e351df7c43ae1ff5a
|
/inst/script/make-metadata.R
|
9ebbf3e7234aeafe8d9de9d397fea560375a7a49
|
[] |
no_license
|
Liubuntu/SeqSQC
|
e0e8ab9fa786d8424b74e7e764bfe04855a30bb7
|
3527554184d8747d898057fc7af883ea6b502d0d
|
refs/heads/master
| 2021-06-02T05:37:37.061464
| 2020-02-14T19:23:18
| 2020-02-14T19:23:18
| 240,587,303
| 0
| 1
| null | 2020-11-29T21:11:18
| 2020-02-14T19:50:18
|
R
|
UTF-8
|
R
| false
| false
| 1,582
|
r
|
make-metadata.R
|
library(AnnotationHubData)
meta <- data.frame(
Title = "DNA-Sequencing dataset from the 1000 Genomes Project",
Description = paste0("DNA-seq data from the 1000 Genomes Project ",
"containing 22 AFR, 22 EAS, 21 EUR and 22 SAS samples. ",
"there are eight known related pairs including ",
"four parent-offspring pairs, two full-sibling pairs ",
"and two half-sibling or avuncular pairs in this dataset, ",
"which is saved as a Genomic Data Structure (GDS) file."),
BiocVersion = "3.6",
Genome = "GRCh37",
SourceType = "VCF",
SourceUrl = paste0("ftp://ftp-trace.ncbi.nih.gov/1000genomes/ftp/release/20130502",
"ftp://ftp-trace.ncbi.nih.gov/1000genomes/ftp/release/20130502/supporting/related_samples_vcf/", collapse=","),
SourceVersion = "May 02 2013",
Species = "Homo sapiens",
TaxonomyId = 9606,
Coordinate_1_based = TRUE,
DataProvider = "1000 Genomes Project",
Maintainer = "Qian Liu <qliu7@buffalo.edu>",
RDataPath = "SeqSQC/benchmark_1000genomes.gds",
Location_Prefix = "http://s3.amazonaws.com/experimenthub/",
RDataClass = "gds.class",
DispatchClass = "GDS",
ResourceName = "benchmark_1000genomes.gds"
)
## Not run:
## Write the data out and put in the inst/extdata directory.
write.csv(meta, file="SeqSQC/inst/extdata/metadata.csv", row.names=FALSE)
## Test the validity of metadata.csv with readMetadataCsv():
readMetadataFromCsv("SeqSQC")
## End(Not run)
|
ec033458d5b2fee0855afa7ad2b86a1f41584e4a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/spatstat/examples/WindowOnly.Rd.R
|
33570ed06b7dd32327be27238bde67ba7cde1d34
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 454
|
r
|
WindowOnly.Rd.R
|
library(spatstat)
### Name: WindowOnly
### Title: Extract Window of Spatial Object
### Aliases: Window.ppm Window.kppm Window.dppm Window.lpp Window.lppm
### Window.msr Window.quad Window.quadratcount Window.quadrattest
### Window.tess Window.layered Window.distfun Window.nnfun Window.funxy
### Window.rmhmodel Window.leverage.ppm Window.influence.ppm
### Keywords: spatial manip
### ** Examples
X <- quadratcount(cells, 4)
Window(X)
|
1e70e3d419481490f1169cd50f4cc221b2ce26fd
|
d7e0f91fed1200959ccb51307c266c3a647f25ea
|
/man/executeExtraction.Rd
|
152fc7adea7f087236a956dcb5f1485010c46818
|
[
"Apache-2.0"
] |
permissive
|
UKVeteran/CancerTxPathway
|
4049f0a5ca0790f8fe287795c9e9dec69fffe0c1
|
e72fea56d38a9cfe1acca47e05f82c1d1e0c5094
|
refs/heads/master
| 2022-04-09T07:56:14.771258
| 2020-03-09T15:09:08
| 2020-03-09T15:09:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 608
|
rd
|
executeExtraction.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Main_RegimenExtraction.R
\name{executeExtraction}
\alias{executeExtraction}
\title{Main}
\usage{
executeExtraction(connectionDetails, oracleTempSchema = NULL,
cdmDatabaseSchema, cohortDatabaseSchema,
vocaDatabaseSchema = cdmDatabaseSchema,
oncologyDatabaseSchema = cdmDatabaseSchema,
createCohortTable = FALSE, createEpisodeAndEventTable = FALSE,
createTargetCohort = FALSE, episodeTable, episodeEventTable,
cohortTable, maxCores = 4)
}
\value{
Episode table, Episode Event table, cancer cohort
}
\description{
Main
}
|
91b11bed0a913903f74d9c266f622737fd203bf4
|
d255d28ece6cbc2967ef00c014eafc859cf68141
|
/RHRV/SplitPowerBandByEpisodes.R
|
361645b393cc40bb5e128c8596deaff302ade7b7
|
[
"MIT"
] |
permissive
|
med-material/ArduinoLoggerShinyApp
|
0fd645b7d721efd9bf005c12b46030402e98b35e
|
a1af24786df19dd4bbd909b5189d77ece88ac49f
|
refs/heads/master
| 2023-08-03T17:01:19.812307
| 2023-07-28T13:11:35
| 2023-07-28T13:11:35
| 228,591,063
| 1
| 1
|
MIT
| 2020-02-18T11:16:30
| 2019-12-17T10:24:41
|
R
|
UTF-8
|
R
| false
| false
| 4,129
|
r
|
SplitPowerBandByEpisodes.R
|
SplitPowerBandByEpisodes <-
function(HRVData, indexFreqAnalysis = length(HRVData$FreqAnalysis), Tag="",
verbose=NULL) {
# ------------------------------------------------
# Splits Power Per Band using Episodes information
# ------------------------------------------------
# Tag -> specifies tag of episodes
# Returns a list with two lists: InEpisodes and OutEpisodes
# Both lists include ULF, VLF, LF and HF bands
HRVData = HandleVerboseArgument(HRVData, verbose)
CheckEpisodes(HRVData)
CheckPowerBand(HRVData, indexFreqAnalysis)
VerboseMessage(HRVData$Verbose, "Splitting power bands using episodes")
VerboseMessage(HRVData$Verbose,
ifelse(Tag == "",
"No tag was specified",
paste("Using episodes with tag:", Tag))
)
# Select episodes to split bands
if (Tag == "") {
ActiveEpisodes = HRVData$Episodes
} else {
ActiveEpisodes = subset(HRVData$Episodes, HRVData$Episodes$Type == Tag)
}
VerboseMessage(HRVData$Verbose,
paste("Number of episodes:", length(ActiveEpisodes$InitTime)))
lframes=length(HRVData$FreqAnalysis[[indexFreqAnalysis]]$HRV)
# lframes is the number of frames
# message("No. of frames: ", lframes)
# message("Beginning of file: ",head(HRVData$Beat$Time,1))
# message("End of file: ",tail(HRVData$Beat$Time,1))
indexInEp=c()
indexOutEp=c()
if (HRVData$FreqAnalysis[[indexFreqAnalysis]]$type=="fourier"){
useShift <- HRVData$FreqAnalysis[[indexFreqAnalysis]]$shift
useSize <- HRVData$FreqAnalysis[[indexFreqAnalysis]]$size
}else{
useShift <- useSize <- 1 / HRVData$Freq_HR
}
for (i in 1:lframes) {
BegOfFrame = head(HRVData$Beat$Time,1)+useShift*(i-1)
EndOfFrame = BegOfFrame + useSize
# if (i<10 || i>lframes-10) {
# message("Frame: ",i," - ")
# message("CenterOfFrame: ",CenterOfFrame)
# }
inEp = FALSE
outEp = TRUE
if (length(ActiveEpisodes$InitTime)>0) {
for (j in 1:length(ActiveEpisodes$InitTime)) {
begEp = ActiveEpisodes$InitTime[j]
endEp = ActiveEpisodes$InitTime[j]+ActiveEpisodes$Duration[j]
if (BegOfFrame>=begEp && EndOfFrame<=endEp) {
inEp = TRUE
}
if (BegOfFrame>=begEp && BegOfFrame<=endEp) {
outEp = FALSE
}
if (EndOfFrame>=begEp && EndOfFrame<=endEp) {
outEp = FALSE
}
}
}
if (inEp) {
indexInEp=c(indexInEp,i)
}
if (outEp) {
indexOutEp=c(indexOutEp,i)
}
} # for (i in 1:lframes)
if (length(indexInEp)==0){
warning(paste0("no frames in episodes with tag '",Tag,"'!"))
}
if (length(indexOutEp)==0){
warning(paste0("no frames outside episodes with tag '",Tag,"'!"))
}
l=list()
l$InEpisodes=list(ULF=HRVData$FreqAnalysis[[indexFreqAnalysis]]$ULF[indexInEp],
VLF=HRVData$FreqAnalysis[[indexFreqAnalysis]]$VLF[indexInEp],
LF=HRVData$FreqAnalysis[[indexFreqAnalysis]]$LF[indexInEp],
HF=HRVData$FreqAnalysis[[indexFreqAnalysis]]$HF[indexInEp]
)
l$OutEpisodes=list(ULF=HRVData$FreqAnalysis[[indexFreqAnalysis]]$ULF[indexOutEp],
VLF=HRVData$FreqAnalysis[[indexFreqAnalysis]]$VLF[indexOutEp],
LF=HRVData$FreqAnalysis[[indexFreqAnalysis]]$LF[indexOutEp],
HF=HRVData$FreqAnalysis[[indexFreqAnalysis]]$HF[indexOutEp]
)
VerboseMessage(
HRVData$Verbose,
c(paste0("No. of frames: ", lframes, "\n"),
paste0("No. of frames in episodes: ", length(l$InEpisodes$ULF), "\n"),
paste0("No. of frames outside episodes: ", length(l$OutEpisodes$ULF), "\n"),
paste0("No. of borderline frames: ", lframes - length(l$InEpisodes$ULF) - length(l$OutEpisodes$ULF)))
)
return(l)
}
|
6babd448c84ab9a1f7e07080645d8c8782a7f71f
|
b8f9a74a91ae75c4d8b5270a4b7a36104c29528e
|
/TestFunction.R
|
cdc276c451abe85ecb5d34882e5b988f273dab87
|
[] |
no_license
|
yuliangzhang/ProgrammingAssignment2
|
aa23bf02a767ad30e3dba58fe2b8d7e642648d5f
|
834f2ef7e3a356c2636aab7964f77cf6ab442bd7
|
refs/heads/master
| 2021-01-14T08:39:16.966959
| 2015-01-23T13:24:51
| 2015-01-23T13:24:51
| 29,729,872
| 0
| 0
| null | 2015-01-23T11:27:35
| 2015-01-23T11:27:34
| null |
UTF-8
|
R
| false
| false
| 116
|
r
|
TestFunction.R
|
tmp <- matrix(c(1,2,3,4,5,6,7,8,7,3,9,2,15,7,23,11),4,4)
tmp
x <- makeCacheMatrix(tmp)
cacheSolve(x)
cacheSolve(x)
|
cd27f2904ba7d4a70f6db4129a61bd4200e35c27
|
0abf16c147a819cf5fd9bb4ce380cf4f2222bb8d
|
/Statistics - Duke University/002_Inferential_Statistics/Week 1/Lab/LabQuiz/LabQuiz.R
|
64c31ba6d5b57c10af2bc5d7939caae074562919
|
[] |
no_license
|
bhunkeler/DataScienceCoursera
|
6c6c17f5808cd6a8e882f7558ca32e70b9b39b30
|
4ae8c176acbb5b2d78ff08379a856c4afefea8f8
|
refs/heads/master
| 2022-05-01T14:26:22.738900
| 2022-03-11T16:56:07
| 2022-03-11T16:56:07
| 43,755,669
| 52
| 120
| null | 2022-03-24T19:07:28
| 2015-10-06T14:23:57
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 5,908
|
r
|
LabQuiz.R
|
# ========================================================================================================================================
# Load Libraries
# ========================================================================================================================================
library('dplyr')
library('ggplot2')
# library('reshape2')
library(devtools)
library(statsr)
# ========================================================================================================================================
# Download and extract Data and load file
# ========================================================================================================================================
data(ames)
ggplot(data = ames, aes(x = area)) +
geom_histogram(binwidth = 250)
ames %>%
summarise(mu = mean(area), pop_med = median(area),
sigma = sd(area), pop_iqr = IQR(area),
pop_min = min(area), pop_max = max(area),
pop_q1 = quantile(area, 0.25), # first quartile, 25th percentile
pop_q3 = quantile(area, 0.75)) # third quartile, 75th percentile
summary(ames$area)
# retrieve 50 Samples
samp1 <- ames %>%
sample_n(size = 50)
mean(samp1$area)
# Calculate the mean of the sample
samp1 %>%
summarise(x_bar = mean(area))
sample_means50 <- ames %>%
rep_sample_n(size = 50, reps = 15000, replace = TRUE) %>%
summarise(x_bar = mean(area))
ggplot(data = sample_means50, aes(x = x_bar)) +
geom_histogram(binwidth = 20) +
geom_vline(xintercept=mean(sample_means50$x_bar), col = 'red')
# sample size
dim(sample_means50)[1]
sample_means_small <- ames %>%
rep_sample_n(size = 10, reps = 25, replace = TRUE) %>%
summarise(x_bar = mean(area))
ggplot(data = sample_means_small, aes(x = x_bar)) +
geom_histogram(binwidth = 20) +
geom_vline(xintercept=mean(sample_means_small$x_bar), col = 'red')
# ================================================================================
# Question 1
# ================================================================================
# 50% of houses in Ames are smaller than 1,499.69 square feet.
# ================================================================================
# Question 2
# ================================================================================
# Sample size of 1000.
# ================================================================================
# Question 3
# ================================================================================
sample_means_small <- ames %>%
rep_sample_n(size = 10, reps = 25, replace = TRUE) %>%
summarise(x_bar = mean(area))
ggplot(data = sample_means_small, aes(x = x_bar)) +
geom_histogram(binwidth = 20) +
geom_vline(xintercept=mean(sample_means_small$x_bar), col = 'red')
# sample size
dim(sample_means_small)[1]
# ================================================================================
# Question 4
# ================================================================================
# Each element represents a mean square footage from a simple random sample of 10 houses.
# ================================================================================
# Question 5
# ================================================================================
# The variability of the sampling distribution 'decreases'
# ================================================================================
# Question 6
# ================================================================================
# The variability of the sampling distribution with the smaller sample size (`sample_means50`) is smaller than the variability of the sampling distribution with the larger sample size (`sample_means150`).
# retrieve 50 Samples
samp1 <- ames %>%
sample_n(size = 50)
# calculate the mean price (point estimate)
mean(samp1$price)
# calculate the mean price (point estimate) for 5000 repetitions on a sample size of 50
sample_means50 <- ames %>%
rep_sample_n(size = 50, reps = 5000, replace = TRUE) %>%
summarise(x_bar = mean(price))
ggplot(data = sample_means50, aes(x = x_bar)) +
geom_histogram(binwidth = 20) +
geom_vline(xintercept=mean(sample_means_small$x_bar), col = 'red')
# calculate the mean price (point estimate) for 5000 repetitions on a sample size of 150
sample_means150 <- ames %>%
rep_sample_n(size = 150, reps = 5000, replace = TRUE) %>%
summarise(x_bar = mean(price))
ggplot(data = sample_means150, aes(x = x_bar)) +
geom_histogram(binwidth = 20) +
geom_vline(xintercept=mean(sample_means150$x_bar), col = 'red')
# retrieve 50 Samples
samp1 <- ames %>%
sample_n(size = 15)
# calculate the mean price (point estimate)
mean(samp1$price)
# calculate the mean price (point estimate) for 2000 repetitions on a sample size of 15
sample_means15 <- ames %>%
rep_sample_n(size = 15, reps = 2000, replace = TRUE) %>%
summarise(x_bar = mean(price))
ggplot(data = sample_means15, aes(x = x_bar)) +
geom_histogram(binwidth = 20) +
geom_vline(xintercept=mean(sample_means15$x_bar), col = 'red')
mean(sample_means15$x_bar)
# calculate the mean price (point estimate) for 2000 repetitions on a sample size of 15
sample_means150 <- ames %>%
rep_sample_n(size = 150, reps = 2000, replace = TRUE) %>%
summarise(x_bar = mean(price))
ggplot(data = sample_means150, aes(x = x_bar)) +
geom_histogram(binwidth = 20) +
geom_vline(xintercept=mean(sample_means150$x_bar), col = 'red')
mean(sample_means150$x_bar)
# calculate the mean price (point estimate) for 2000 repetitions on a sample size of 15
sample_means50 <- ames %>%
rep_sample_n(size = 15, reps = 2000, replace = TRUE) %>%
summarise(x_bar = mean(price))
ggplot(data = sample_means50, aes(x = x_bar)) +
geom_histogram(binwidth = 20) +
geom_vline(xintercept=mean(sample_means50$x_bar), col = 'red')
mean(sample_means50$x_bar)
mean(sample_means150$x_bar)
|
d762c5f92910a30cc33ddab2eda01f31022d9ae1
|
57a18b3e750c7e1d89af3d98778a5cea9de75555
|
/tests/testthat.R
|
acfa222eb1ecf3422da2aa396dadc8a5a1c50e59
|
[
"MIT"
] |
permissive
|
piotrekjanus/aiRly
|
b97ce15d1e15c0701926738551ecf1598881e8d8
|
093f6f4756d0f18af3c7445845fa47f5b624deb0
|
refs/heads/master
| 2021-03-02T22:17:11.450643
| 2020-03-19T22:31:01
| 2020-03-19T22:31:01
| 245,910,217
| 0
| 0
|
NOASSERTION
| 2020-03-12T18:40:16
| 2020-03-09T00:22:38
|
R
|
UTF-8
|
R
| false
| false
| 54
|
r
|
testthat.R
|
library(testthat)
library(aiRly)
test_check("aiRly")
|
f686ad8c037a930e56cfb56ab22853db6a14460f
|
f6c3107039eca2c6295fefc456bc5b369d3f4472
|
/run_analysis.R
|
35255411b39a4f39e23254fa0ab0930455285fdb
|
[] |
no_license
|
jatinchawda1503/run_analysis
|
36d47d5cc368cb1f91b127526f9849ea585e8e66
|
8a4acb68bf2a0269c0a90d8af65904fad85ac355
|
refs/heads/master
| 2020-10-01T01:07:12.504411
| 2019-12-11T20:23:30
| 2019-12-11T20:23:30
| 227,416,822
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,307
|
r
|
run_analysis.R
|
library(dplyr)
library(data.table)
#Setting File
get_file <- "Coursera_DS3_Final.zip"
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
directory <- getwd()
#checking the file and downloading it
if (!file.exists(get_file)){
download.file(url, get_file, method="curl")
}
#unzipping file
unzip(zipfile = get_file)
#loading the files and getting the data
activity_labels <- fread(file.path(directory,"UCI HAR Dataset/features.txt"),
col.names = c("classLabels","activityName"))
features <- fread(file.path(directory, "UCI HAR Dataset/features.txt"), col.names = c("index", "featureNames"))
train <- fread(file.path(directory, "UCI HAR Dataset/train/X_train.txt"),colClasses = features$featureNames)
trainActivities <- fread(file.path(directory, "UCI HAR Dataset/train/Y_train.txt"), col.names = c("Activity"))
trainSubjects <- fread(file.path(directory, "UCI HAR Dataset/train/subject_train.txt"), col.names = c("SubjectNum"))
test <- fread(file.path(directory,"UCI HAR Dataset/test/X_test.txt"), colClasses = features$featureNames)
testActivities <- fread(file.path(directory, "UCI HAR Dataset/test/Y_test.txt"),col.names = c("Activity"))
testSubjects <- fread(file.path(directory, "UCI HAR Dataset/test/subject_test.txt"), col.names = c("SubjectNum"))
#merging the data sets
subjects <- cbind(trainSubjects,testSubjects)
train <- cbind(trainSubjects,trainActivities,train)
test <- cbind(testSubjects,testActivities,test)
merge_data <- rbind(train,test)
#Extracts only the measurements on the mean and standard deviation for each measurement.
cal_measurement <- merge_data %>% select(merge_data$SubjectNum,contains("mean"),contains("std"))
#Uses descriptive activity names to name the activities in the data set
cal_measurement$index <- activity_labels[cal_measurement$index,2]
#appropriately labels the data set with descriptive variable names
names(cal_measurement)[2] = "Activity"
names(cal_measurement)<-gsub("Acc", "Accelerometer", names(cal_measurement))
names(cal_measurement)<-gsub("Gyro", "Gyroscope", names(cal_measurement))
names(cal_measurement)<-gsub("BodyBody", "Body", names(cal_measurement))
names(cal_measurement)<-gsub("Mag", "Magnitude", names(cal_measurement))
names(cal_measurement)<-gsub("^t", "Time", names(cal_measurement))
names(cal_measurement)<-gsub("^f", "Frequency", names(cal_measurement))
names(cal_measurement)<-gsub("tBody", "TimeBody", names(cal_measurement))
names(cal_measurement)<-gsub("-mean()", "Mean", names(cal_measurement), ignore.case = TRUE)
names(cal_measurement)<-gsub("-std()", "STD", names(cal_measurement), ignore.case = TRUE)
names(cal_measurement)<-gsub("-freq()", "Frequency", names(cal_measurement), ignore.case = TRUE)
names(cal_measurement)<-gsub("angle", "Angle", names(cal_measurement))
names(cal_measurement)<-gsub("gravity", "Gravity", names(cal_measurement))
#From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
FinalData <- cal_measurement %>%
group_by(SubjectNum, Activity) %>%
summarise_all(funs(mean))
write.table(FinalData, "FinalData.txt", row.name=FALSE)
|
bec83956493cdac967e1b7b206d45b72405b7f2a
|
4ea05b7ebcfc00552c3f5cb2d596a63cf146a7e9
|
/create_data.R
|
7c2ff4b8cdfd00e81f7041d85a0abc4d8b8390fc
|
[] |
no_license
|
joranE/single-skier
|
28837b65e841a5657310805015ff0d52ff9f5be8
|
bef5c429d7bde84992a15c572bdaeef403ef41f9
|
refs/heads/master
| 2020-05-20T02:45:31.500010
| 2017-10-04T03:35:38
| 2017-10-04T03:35:38
| 35,615,791
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 698
|
r
|
create_data.R
|
library(statskier2)
library(dplyr)
conl <- db_xc_local()
x <- ss_query(conl,"select * from main")
x$cat2[x$cat2 == ""] <- NA
maj_ind <- x$cat1 %in% c('WC','WSC','OWG','TDS')
x <- split(x,maj_ind)
#XC_FAC <- load_xc_conv()
x[[2]] <- x[[2]] %>%
mpb() %>%
standardize_mpb()
x[[1]]$mpb <- NA
x <- do.call("rbind",x)
x <- dplyr::arrange(x,id)
out <- dplyr::filter(x,fisid %in% fisid[cat1 %in% c('WC','WSC','OWG','TDS')])
file.remove("fis.sqlite3")
fis_db <- src_sqlite("fis.sqlite3",create = TRUE)
fis_main <- copy_to(fis_db,
out,
name = "main",
temporary = FALSE,
indexes = list("raceid","fisid","cat1","type"))
|
0cf94b5ec28e50317c12829f70576a856162a071
|
2d71ce9dffea7ca2c9c9e1d1efaf1b27ba303d63
|
/games_howell.R
|
fcf1daf00ac7e65ea662d60239bdfad1a4e73043
|
[] |
no_license
|
sgelias/cerrado-yeasts
|
590f144b83f15537510509c18d71095d1aab9b12
|
81165829679125c7ff588f325d94e02a575a974a
|
refs/heads/master
| 2020-12-26T11:43:46.271494
| 2020-06-05T18:18:33
| 2020-06-05T18:18:33
| 237,498,345
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,208
|
r
|
games_howell.R
|
#' @title Games-Howell post-hoc test
#' @name games_howell
#' @description This function produces results from Games-Howell post-hoc tests
#' for Welch's one-way analysis of variance (ANOVA) (`stats::oneway.test()`).
#'
#' @inheritParams pairwise_comparisons
#'
#' @importFrom stats ptukey qtukey
#' @importFrom utils combn
#'
#' @note This is based on the implementation of Games-Howell test by Aaron
#' Schlegel (https://rpubs.com/aaronsc32) and published on RPubs
#' (https://rpubs.com/aaronsc32/games-howell-test).
#'
#' @keywords internal
# function body
games_howell <- function(data, x, y) {
# ============================ data preparation ==========================
# creating a dataframe
data <-
dplyr::select(
.data = data,
x = !!rlang::enquo(x),
y = !!rlang::enquo(y)
) %>%
tidyr::drop_na(data = .) %>%
dplyr::mutate(.data = ., x = droplevels(as.factor(x))) %>%
tibble::as_tibble(x = .)
# variables of interest for running the test
grp <- data$x
obs <- data$y
# create combinations
combs <- utils::combn(x = unique(grp), m = 2)
# Statistics that will be used throughout the calculations:
# n = sample size of each group
# groups = number of groups in data
# Mean = means of each group sample
# std = variance of each group sample
n <- tapply(X = obs, INDEX = grp, FUN = length)
groups <- length(tapply(X = obs, INDEX = grp, FUN = length))
Mean <- tapply(X = obs, INDEX = grp, FUN = mean)
std <- tapply(X = obs, INDEX = grp, FUN = var)
# ============================ analysis ===============================
statistics <- lapply(X = 1:NCOL(combs), FUN = function(x) {
# mean difference
mean.diff <- Mean[combs[2, x]] - Mean[combs[1, x]]
# t-values
t <-
(abs(Mean[combs[1, x]] - Mean[combs[2, x]])) /
(sqrt((std[combs[1, x]] / n[combs[1, x]]) +
(std[combs[2, x]] / n[combs[2, x]])))
# degrees of freedom (df)
df <-
((std[combs[1, x]] / n[combs[1, x]] +
std[combs[2, x]] / n[combs[2, x]])^2) /
((((std[combs[1, x]] / n[combs[1, x]])^2 / (n[combs[1, x]] - 1)) +
((std[combs[2, x]] / n[combs[2, x]])^2 / (n[combs[2, x]] - 1))))
# p-values
p <-
stats::ptukey(
q = t * sqrt(2),
nmeans = groups,
df = df,
lower.tail = FALSE
)
# sigma standard error
se <-
sqrt(x = 0.5 * (std[combs[1, x]] / n[combs[1, x]] +
std[combs[2, x]] / n[combs[2, x]]))
# upper confidence limit for mean difference
conf.high <- lapply(X = 1:NCOL(combs), FUN = function(x) {
mean.diff + stats::qtukey(
p = 0.95,
nmeans = groups,
df = df
) * se
})[[1]]
# lower confidence limit for mean difference
conf.low <- lapply(X = 1:NCOL(combs), FUN = function(x) {
mean.diff - stats::qtukey(
p = 0.95,
nmeans = groups,
df = df
) * se
})[[1]]
# Group Combinations
group1 <- as.character(combs[1, x])
group2 <- as.character(combs[2, x])
# Collect all statistics into list
stats <-
list(group1, group2, mean.diff, se, t, df, p, conf.high, conf.low)
})
# unlist statistics collected earlier
stats.unlisted <- lapply(statistics, function(x) {
unlist(x)
})
# create dataframe from flattened list
results <-
data.frame(matrix(
unlist(stats.unlisted),
nrow = length(stats.unlisted),
byrow = TRUE
))
# select columns that should be numeric and change with as.numeric
results[, 3:ncol(results)] <-
round(as.numeric(as.matrix(results[, 3:ncol(results)])), digits = 3)
# Rename data frame columns
colnames(results) <-
c(
"group1",
"group2",
"mean.difference",
"se",
"t.value",
"df",
"p.value",
"conf.high",
"conf.low"
)
# converting it to tibble
results %<>%
tibble::as_tibble(x = .) %>%
dplyr::select(
.data = .,
group1:mean.difference,
conf.low,
conf.high,
dplyr::everything()
)
# select the final dataframe
return(results)
}
|
e7929bb3220fdd8b6a18d1726026982fbee81c59
|
41a58a02a504850bfa7dea1bc897e08f82a47680
|
/man/theme_eq_timeline.Rd
|
714cc6e527e7adf1d62f3883fd9e1dbfce5bb6ce
|
[] |
no_license
|
rafaelcb/NOAA
|
e543afe61f03f23f60e202d9c0847d171bc1817f
|
21f5ea6dedc2f285414a0097d188a13dee5324c5
|
refs/heads/master
| 2021-01-01T17:35:04.187428
| 2017-10-09T03:16:43
| 2017-10-09T03:16:43
| 98,105,781
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 616
|
rd
|
theme_eq_timeline.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geoms.R
\docType{data}
\name{theme_eq_timeline}
\alias{theme_eq_timeline}
\title{Theme for geom_timeline plot}
\format{An object of class \code{theme} (inherits from \code{gg}) of length 7.}
\usage{
theme_eq_timeline
}
\description{
This theme helps visualize the information from a geom_timeline
plot better.
}
\examples{
\dontrun{eq_data \%>\% eq_clean_data() \%>\%
filter(COUNTRY \%in\% c("GREECE", "ITALY"), YEAR > 2000) \%>\%
ggplot(aes(x = DATE, y = COUNTRY)) +
geom_timeline() + theme_eq_timeline()}
}
\keyword{datasets}
|
fd5e5b2d5321af72b962d9caea3c25691483fc8c
|
5bf3987cf8d91e5fb8427fb82342bb158a24472e
|
/cachematrix.R
|
ca7ea27764171a2e2624ee8c23002ab605a9a8af
|
[] |
no_license
|
ivancostabernardo/ProgrammingAssignment2
|
fc89f49df2c95216bca4588471951a9fd00f3446
|
6f638e53493f131e3725866693a1cad3cf12e8d2
|
refs/heads/master
| 2020-12-25T09:09:12.548024
| 2016-07-01T00:31:58
| 2016-07-01T00:31:58
| 61,958,038
| 0
| 0
| null | 2016-06-25T19:17:19
| 2016-06-25T19:17:19
| null |
UTF-8
|
R
| false
| false
| 1,041
|
r
|
cachematrix.R
|
## The following two functions work together to calculate the inverse of a given matrix,
## caching the result and thus avoiding unnecessary calculations.
## This function returns a list with four functions:
## 'set' sets the value of the matrix;
## 'get' gets the value of the matrix;
## 'setsolve' sets the value of the inverse matrix;
## 'getsolve' gets the value of the inverse matrix.
makeCacheMatrix <- function(x = matrix()) {
inv.mat <- NULL
set <- function(y) {
x <<- y
inv.mat <<- NULL
}
get <- function() x
setsolve <- function(solve) inv.mat <<- solve
getsolve <- function() inv.mat
list(set = set, get = get, setsolve = setsolve, getsolve = getsolve)
}
## Uses the list returned by the function above.
cacheSolve <- function(x, ...) {
inv.mat <- x$getsolve()
## Checks if the inverse matrix has already been evaluated and cached.
if(!is.null(inv.mat)) {
message("getting cached data")
return(inv.mat)
}
data <- x$get()
inv.mat <- solve(data, ...)
x$setsolve(inv.mat)
inv.mat
}
|
dd46660a52bb06272321d272b8ecd460bd26e416
|
e7a8b0ad922d03cfb245c08821332ce8ce7ee333
|
/plot3.R
|
9373b8bc04ff70b3afdefd87339b6cd96a385096
|
[] |
no_license
|
YeshwantBhat/ExData_Plotting1
|
6de0d7a5260d47205b78bf6c24a61a6f6acd7889
|
8671c54296debc80f913be5c0d8dc8f642ab8ef8
|
refs/heads/master
| 2021-01-15T08:16:10.209744
| 2015-07-11T14:39:19
| 2015-07-11T14:39:19
| 38,778,730
| 0
| 0
| null | 2015-07-08T20:39:21
| 2015-07-08T20:39:21
| null |
UTF-8
|
R
| false
| false
| 1,119
|
r
|
plot3.R
|
householdpower<-read.csv("household_power_consumption.txt",h=T,sep = ";",nrows=2075259,stringsAsFactors=FALSE)
str(householdpower)
head(householdpower)
householdpower1<-subset(householdpower,householdpower$Date =="1/2/2007")
str(householdpower1)
head(householdpower1)
householdpower1<-subset(householdpower,householdpower$Date == "1/2/2007" | householdpower$Date == "2/2/2007")
str(householdpower1)
dates<-householdpower1$Date
head(dates)
datestime<-paste(dates,householdpower1$Time)
head(datestime)
time<-strptime(datestime,"%d/%m/%Y %H:%M:%S")
head(time)
householdpower1$Sub_metering_1<-as.numeric(householdpower1$Sub_metering_1)
householdpower1$Sub_metering_2<-as.numeric(householdpower1$Sub_metering_2)
png("plot3.png",width=480,height=480)
plot(time,householdpower2$Sub_metering_1,typ='l',ylab="Energy sub metering",xlab="")
points(time,householdpower2$Sub_metering_2,type="l",lwd=1,col="red")
points(time,householdpower2$Sub_metering_3,type="l",lwd=1,col="blue")
legend("topright",lty=c(1,1,1),lwd=c(1,1,1),pch=NA,col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
|
1430cf28b7674529e5afbabaae2b502f5db1249b
|
78eb646b4ef3565ef1442cad2a47e2e152dcea81
|
/Housing Price/ISLR- Housing Price.R
|
569b71362b4877d5869424b8552f6e9f16faac24
|
[] |
no_license
|
sxd213/dataScienceML
|
89b4ed2200fdb62ec95c57abd9acae16e7f1ccb7
|
f87ad39626459c0f8ba6b356df1412c0c9ab1e9e
|
refs/heads/master
| 2021-01-23T06:14:37.892482
| 2018-01-21T03:41:50
| 2018-01-21T03:41:50
| 93,012,311
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 714
|
r
|
ISLR- Housing Price.R
|
rm(sales, tsales, sales1, train, test)
dir <- 'D:\\MyStuff\\Housing Price'
setwd(dir)
sales <- read.csv('kc_house_data.csv', stringsAsFactors = FALSE)
#Back up data set
tsales <- read.csv('kc_house_data.csv', stringsAsFactors = FALSE)
str(sales)
attach(sales)
summary(price)
# basic manipulation
sales$saleyrmnth <- as.integer(format(substr(sales$date,1,6), format ="Y%-m%"))
bedrooms <- as.factor(bedrooms)
bathrooms<- as.factor(bathrooms)
floors <- as.factor(floors)
waterfront<-as.factor(waterfront)
view <- as.factor(view)
condition<- as.factor(condition)
grade<- as.factor(grade)
zipcode <- as.factor(zipcode)
sales1 <-
subset(sales, select = -c(id, date))
pairs(sales1)
|
1a4106669268d00e81ae74103c11944128582482
|
dcede2b512a9d572c53eca741a56fcd745308100
|
/workflow/scripts/reduced-dimensions/plotClusteredPCs.R
|
5be12d7d79057a594ab4b6286a47914c773a8e05
|
[
"MIT"
] |
permissive
|
jma1991/DiasTailbudData
|
1c9a71d8f3364a26f67eba96626070e352ca4477
|
0e37976fdd79c7972066ded83c789edf6a23acbe
|
refs/heads/main
| 2023-06-09T19:59:11.627780
| 2021-07-05T09:00:44
| 2021-07-05T09:00:44
| 310,294,386
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,289
|
r
|
plotClusteredPCs.R
|
#!/usr/bin/env Rscript
set.seed(1701)
main <- function(input, output, log) {
# Log function
out <- file(log$out, open = "wt")
err <- file(log$err, open = "wt")
sink(out, type = "output")
sink(err, type = "message")
# Script function
library(ggplot2)
library(scales)
library(scran)
dat <- readRDS(input$rds)
num <- metadata(dat)$chosen
plt <- ggplot(as.data.frame(dat), aes(n.pcs, n.clusters)) +
geom_point(colour = "#79706E") +
geom_vline(xintercept = num, colour = "#E15759", linetype = "dashed") +
annotate("text", x = num, y = Inf, label = sprintf("PCs = %s ", num), angle = 90, vjust = -1, hjust = 1, colour = "#E15759") +
scale_x_continuous(name = "Principal component", breaks = c(1, 10, 20, 30, 40, 50), labels = label_ordinal()) +
scale_y_continuous(name = "Number of clusters") +
theme_bw()
ggsave(output$pdf, plot = plt, width = 8, height = 6, scale = 0.8)
# Image function
library(magick)
pdf <- image_read_pdf(output$pdf)
pdf <- image_trim(pdf)
pdf <- image_border(pdf, color = "#FFFFFF", geometry = "50x50")
pdf <- image_write(pdf, path = output$pdf, format = "pdf")
}
main(snakemake@input, snakemake@output, snakemake@log)
|
6dc735a15d6b92f67945d1d83ada85690395de78
|
639ae17e0f7f9fba6dccb7387281741c3594256e
|
/FINVIZ_NEWS_SCRAPE.R
|
92e2d7f4c446a2b029cf5cd476b04f31f93144db
|
[] |
no_license
|
jgQuantScripts/FinViz-News-Scraper
|
14c12688a25e66a8e4306186cb2c8beac614a171
|
8adde15bedd485660e920600f7bda307e13fd6c5
|
refs/heads/main
| 2022-12-28T14:56:52.621388
| 2020-10-13T00:27:08
| 2020-10-13T00:27:08
| 302,469,968
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 939
|
r
|
FINVIZ_NEWS_SCRAPE.R
|
require("rvest");require("stringr")
ticker = "ZM"
getFinNews = function(ticker)
{
Sys.sleep(5)
url <- paste0("https://finviz.com/quote.ashx?t=",ticker)
# read finviz news data
data <- read_html(url)
# copy xpath
data = data %>% html_nodes(xpath = "//*[@id='news-table']") %>% html_table()
tmp = do.call(rbind,data)
dtime = as.data.frame(tmp[,1])
# Split Dates & Times
dtime <- t(as.data.frame(str_split(dtime[,1], pattern = " ")))
dates <- as.character(dtime[,1])
tmz <- as.character(dtime[,2])
# detect times by using colon ":" & replace with NA
dates[str_detect(dates,pattern = ":")] <- NA
dates <- na.locf(dates)
# combine into a timeStamp
timeStamp <- as.data.frame(as.POSIXct(paste(dates,tmz), format="%b-%d-%y %I:%M%p"))
# combine timeStamps with News
tmp[,1] <- timeStamp
tmp[,3] <- ticker
colnames(tmp) <- c("Date","News","Ticker")
tmp
}
dat <- getFinNews("AAPL")
|
c78ca370c8ae9e995fe7620970cfaae817af4625
|
6d0ab19a101dc52bb2b110ad4d168043b1c0f298
|
/changes.R
|
53731b526118abec1bf60359ee7d045cd163db6c
|
[] |
no_license
|
jjchieppa/Nocturnal-gs_Panicum-virgatum-UNF
|
edaa9d306d147cb8cdb10a0cebca5816af825a5e
|
79c2519bcd16845675e13f97ec55f5777a534e11
|
refs/heads/master
| 2021-11-23T23:08:45.769587
| 2021-10-27T14:01:58
| 2021-10-27T14:01:58
| 250,525,049
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 30
|
r
|
changes.R
|
# I'm making some BIG changes!
|
2bd70e8b5a607743fa667505a59eb81165344a01
|
7a914c0e1004de6cff6180a4f728293d53931b6d
|
/create_xcpfiles.R
|
ff21f2f0dd5894c75b1c47f36bea5a6def8caeed
|
[] |
no_license
|
tientong98/xcpEngineTutorial2
|
23657cbb83f72749fa33e08d2e11748b288c72ff
|
e79384f96b65f745814ecb231daedc1aa9553d0f
|
refs/heads/master
| 2021-01-09T04:34:28.417062
| 2020-02-21T23:12:47
| 2020-02-21T23:12:47
| 242,247,453
| 1
| 0
| null | 2020-02-21T23:18:59
| 2020-02-21T23:18:58
| null |
UTF-8
|
R
| false
| false
| 2,206
|
r
|
create_xcpfiles.R
|
library(dplyr)
lut <- read.table("lut.tsv", stringsAsFactors = F)
names(lut) <- lut[1,]
lut <- lut[-1,]
rownames(lut) <- NULL
lut[,1] <- rownames(lut)
write.table(lut, "lut_updated.tsv", col.names = T, row.names = F, quote = F, sep = "\t")
# network mapping copying from James's script
#NETWORK_MAPPING = {
# 1: "VisCent",
# 2: "VisPeri",
# 3: "SomMotA",
# 4: "SomMotB",
# 5: "DorsAttnA",
# 6: "DorsAttnB",
# 7: "SalVentAttnA",
# 8: "SalVentAttnB",
# 9: "LimbicA",
# 10: "LimbicB",
# 11: "ContC",
# 12: "ContA",
# 13: "ContB",
# 14: "TempPar",
# 15: "DefaultC",
# 16: "DefaultA",
# 17: "DefaultB",
#}
CommunityNames <- as.data.frame(c("VisCent",
"VisPeri",
"SomMotA",
"SomMotB",
"DorsAttnA",
"DorsAttnB",
"SalVentAttnA",
"SalVentAttnB",
"LimbicA",
"LimbicB",
"ContC",
"ContA",
"ContB",
"TempPar",
"DefaultC",
"DefaultA",
"DefaultB"), nrow = 17, ncol = 1, stringsAsFactors = F)
write.table(CommunityNames, "mergedCommunityNames.txt", col.names = F, row.names = F, quote = F)
lut$network <- NA
for (i in 1:400) {lut$network[i] <- strsplit(lut[,2], "_")[[i]][2]}
for (i in 401:432) {lut$network[i] <- strsplit(lut[,2], "-")[[i]][2]}
lut$CommunityAffiliation <- NA
for (i in 1:length(lut$index)) {
for (j in 1:17) {
if(lut$network[i] == CommunityNames[j,1]){lut$CommunityAffiliation[i] <- j}}}
CommunityAffiliation <- as.data.frame(lut$CommunityAffiliation, nrow = 432, ncol = 1, stringsAsFactors = F)
write.table(CommunityAffiliation, "mergedCommunityAffiliation.txt", col.names = F, row.names = F, quote = F)
NodeIndex <- as.data.frame(lut$index, nrow = 432, ncol = 1, stringsAsFactors = F)
write.table(NodeIndex, "mergedNodeIndex.txt", col.names = F, row.names = F, quote = F)
NodeNames <- as.data.frame(lut$regions, nrow = 432, ncol = 1, stringsAsFactors = F)
write.table(NodeNames, "mergedNodeNames.txt", col.names = F, row.names = F, quote = F)
|
425a096add48e66814825a6737ead6bdf2dc1358
|
8a313266928dc5e985050ebaeb1feb6fe43a2929
|
/ftd_new.R
|
026764fbe26a19b585ae29964ea0a393ad4e571d
|
[] |
no_license
|
mt-christo/ej
|
b4a2c283285256f9fd3961b0f67dd89be20f890a
|
265a9c171eb64e5e4e5ed7d2e2053908b1d946d4
|
refs/heads/master
| 2022-12-12T12:03:41.654039
| 2019-10-26T03:22:06
| 2019-10-26T03:22:06
| 102,653,282
| 0
| 1
| null | 2022-12-08T00:54:12
| 2017-09-06T20:07:26
|
R
|
UTF-8
|
R
| false
| false
| 10,848
|
r
|
ftd_new.R
|
# source('rapache_eval1.R')
library(credule)
library(bindata)
library(foreach)
source('ftd_func.R')
y_tod = c(0.04,0.07)
y_sigmas = c(0.005,0.01)
y_as = c(0.15,0.1)
rfr_in = 0.01
spread_years_in = c(5,5)
basket_years_in = 5
rec_rate_in = 0.4
time_step = 0.5
n_steps = basket_years_in/time_step
# Building LOG IR and PD trees
itrees = list()
ir_steps = y_sigmas * sqrt(3*time_step)
for(i in 1:length(y_tod)){
itree = list()
itree[[1]] = y_tod[i]
for(j in 2:n_steps)
if(j*ir_steps[i] < 0.00184/(y_as[i]*time_step))
itree[[j]] = itree[[j-1]][1] + seq(ir_steps[i],len=2*j-1,by=-ir_steps[i]) else
itree[[j]] = itree[[j-1]]
itrees[[i]] = itree
}
ptrees = foreach(i1=1:length(itrees))%do%{ foreach(i2=1:length(itrees[[i1]]))%do%{
foreach(y=itrees[[i1]][[i2]],.combine=c)%do%imp_pd(y - rfr_in, rfr_in, spread_years_in[i], rec_rate_in) } }
library(mnormt); library(xts); library(foreach); library(foreach)
cm = function(n, x) { diag(n) + x*(1-diag(n)) }
N = 2; s_d = 100
r=rmnorm(1000000,rep(0,N),s_d*cm(N,0.01)*s_d)
a=rollapply(1:nrow(r),width=1000,by=1000,FUN=function(i){ cor(r[i,])[1,2] })
sd(a)
plot(a)
k=1:N
s1 = foreach(j = seq(0.1,10,len=50),.combine=c)%dopar%{
r=rmnorm(1000000,rep(0,N),j*k*cm(N,0.1))
a=rollapply(1:nrow(r),width=500,by=1000,FUN=function(i){ cor(r[i,])[1,2] })
sd(a)
}
s2 = foreach(j = seq(0.01,0.9,len=50),.combine=c)%dopar%{
r=rmnorm(1000000,rep(0,N),cm(N,j))
a=rollapply(1:nrow(r),width=500,by=1000,FUN=function(i){ cor(r[i,])[1,2] })
sd(a)
}
plot(s1,ylim=c(0,0.07))
lines(s2)
x = get(load('wo_elements.RData'))
cor(x[,c('MU.US.Equity','YNDX.US.Equity','UCG.IM.Equity')])
foreach(j=c('MU.US.Equity','YNDX.US.Equity','UCG.IM.Equity'),.combine=c)%do%
sd(diff(log(x[index(x)>'2017-01-01',j]))[-1])
sd(diff(log(x[index(x)>'2017-01-01',20]))[-1])
colnames(x) = gsub(' Equity','',gsub('\\.',' ',colnames(x)))
save(x,file='wo_elements.RData')
plot(foreach(i=1:300,.combine=c)%do%bond.price(as.Date('2017-01-01')+i, '2018-01-01', 0.01, 1, 0.03, '30/360'))
N = 360
cormat = function(x,n) { diag(n) + x*(1-diag(n)) }
#sigm=c(0.3,0.3);rfr=0.01;corr0=0.01;r0=c(0.06,0.1);k=0.1;pds=c(0.05,0.06);liq_sp=1
pd_mc = function(sigm,rfr,corr0,r0,k,pds){ #liq_sp){
n = length(sigm)
sf = foreach(i=1:n,.combine=c)%do%uniroot(function(x){ round(pd_mc_Single(sigm[i],rfr,r0[i],k,x),3) - pds[i] }, c(0.0001,0.4))$root
cm0 = cormat(corr0,n)
x = foreach(j1=1:25,.combine=c)%dopar%{ foreach(j=1:500,.combine=c)%do%{
mr0 = r0
dB = rmnorm(N,rep(0,n),cm0)
dP = rmnorm(N,rep(0,n),cm0)
for(i in 1:n)
dP[,i] = pnorm(dP[,i])
dP0 = rmnorm(N,rep(0,n),cm0) > 0
r = matrix(0,N,n); r[1,] = r0; tmp_sigm = sigm
for(i in 2:N) {
tmp_sigm = ifelse(dP[i,] < sf, tmp_sigm*2, tmp_sigm)
d_tmp = r[i-1,] + k*(mr0 - r[i-1,]) + tmp_sigm*sqrt(r[i-1,])*dB[i,]
r[i,] = ifelse(d_tmp > rfr, d_tmp, rfr)
tmp_sigm = ifelse(dP0[i,], sigm, tmp_sigm)
}
b = array(FALSE,N) # matrix(0,N,n);
for(i in 1:n){
b_tmp = bond.price(as.Date('2017-01-01'), '2018-01-01', 0.01, 1, r[,i], '30/360')
b_tmp = b_tmp + (100-b_tmp)*(1:N)/N
b = b | (b_tmp < 40)
}
if (max(b)) 1 else 0
}}
mean(x)
}
pd_mc(c(0.3,0.3),0.6,c(0.06,0.1),0.05,c(0.01,0.01))
uniroot(function(x){ pd_mc(x) - r0 }, c(0.2,0.4))
f=function(x) { round(pd_mc_Single(0.35,0.06,0.05,x,6),3) - 0.05 }
uniroot(f,c(0.01,0.6))$root
pd_mc_Single(sigm=0.1, rfr=0.01, r0=0.1, k=0.1, switch_factor=0.05)
pd_mc_Single = function(sigm,rfr,r0,k,switch_factor){ #liq_sp){
x = foreach(j1=1:25,.combine=c)%dopar%{ foreach(j=1:1000,.combine=c)%do%{
#N = 360; sigm=0.01; rfr=0.01; corr0=0.6; r0=0.1; k=0.1; liq_sp=1; switch_factor = 0.01;
mr0 = r0
# dB = rnorm(N,0,1)*(3^floor(log(runif(N))/log(switch_factor)))
dB = rnorm(N,0,1)
dP = pnorm(rnorm(N,0,1))
r = array(0,N); r[1] = r0;
for(i in 2:N){
if(dP[i] < switch_factor) mr0 = mr0*(1 + 10*sigm) #else if (dP[i] > 1 - switch_factor) mr0 = mr0/(1 + sigm)
r[i] = max(rfr, r[i-1] + k*(mr0 - r[i-1]) + sigm*sqrt(r[i-1])*dB[i])
}; #plot(r)
b_tmp = bond.price(as.Date('2017-01-01'), '2018-01-01', 0.01, 1, r, '30/360')
b = b_tmp + (100-b_tmp)*(1:N)/N < 40
if (max(b)) 1 else 0
}}
mean(x)
}
plot(b+(100-b)*(1:N)/N)
plot(b)
plot(b[,2])
osc_wght_arr = function(x, x0){
res = t((t(x)-x0)/x0)
res/(rowSums(abs(res))+0.0000001)
}
cm0 = cormat(0.9999,2)
N = 360
#sigm=0.05;corr0=0.6;r0=c(0.06,0.1);k=0.9;liq_sp=1
pd_mc = function(sigm,corr0,r0,k,liq_sp){
cm0 = cormat(corr0,2)
x = foreach(j1=1:30,.combine=c)%dopar%{ foreach(j=1:30,.combine=c)%do%{
#x = foreach(j1=1:1,.combine=c)%dopar%{ foreach(j=1:1,.combine=c)%do%{
dB = rmnorm(N,c(0,0),cm0)
r = matrix(0,N,2); r[1,] = r0
for(i in 2:N) {
d_tmp = r[i-1,] + k*(mr0 - r[i-1,]) + sigm*sqrt(r[i-1,])*dB[i,]
r[i,] = ifelse(d_tmp > 0, d_tmp, 0)
}
cor(r)
b = matrix(0,N,ncol(r));
for(i in 1:ncol(r)){
b_tmp = bond.price(as.Date('2017-01-01'), '2018-01-01', 0.03, 1, r[,i], '30/360')
b[,i] = b_tmp + (100-b_tmp)*(1:N)/N
}
w = r/rowSums(r) # osc_wght_arr(r,mr0)
#plot(cumsum(rowSums(w[1:(N-1),]*(b[2:N,]-b[1:(N-1),])/b[1:(N-1),])))
sum(rowSums(w[1:(N-1),]*(b[2:N,]-b[1:(N-1),])/b[1:(N-1),]))
}}
mean(x)
}
plot(foreach(i=seq(0.01,0.9,len=10),.combine=c)%do%pd_mc(0.01,0,c(0.06,0.1),i,1))
plot(b[,1])
# d = x_e; i=3
save_statics = function(conn,d,static_kind){
fids = foreach(i=1:ncol(d),.combine=c)%do%as.numeric(dbGetQuery(conn, paste("Q_AddSecurityField '",d[1,i],"'",sep='')))
secids = foreach(i=2:nrow(d),.combine=c)%do%as.numeric(dbGetQuery(conn, paste("Q_AddSecurity '",d[i,1],"'",sep='')))
for(i in 1:length(secids)){
q = ""
for(j in 1:length(fids))
q = paste(q,"\nexec Q_AddSecurityStaticData ",secids[i],", ",fids[j],", '",gsub("'","",d[i+1,j]),"', ",static_kind,sep='')
dbGetQuery(conn, q)
print(i)
}
}
library(xlsx)
library(RJDBC)
library(foreach)
drv = JDBC('com.microsoft.sqlserver.jdbc.SQLServerDriver','sqljdbc4-2.0.jar')
conn = dbConnect(drv, 'jdbc:sqlserver://10.245.67.30;user=RUser;password=Qwerty12;database=QUIK_REPO')
x = read.csv('/var/share/hist.csv',sep=';',header=FALSE,stringsAsFactors=FALSE)
x_e = read.csv('/var/share/Equities.csv',sep=';',header=FALSE,stringsAsFactors=FALSE)
x_f = read.csv('/var/share/Funds.csv',sep=';',header=FALSE,stringsAsFactors=FALSE)
x_c = read.csv('/var/share/Currencies.csv',sep=';',header=FALSE,stringsAsFactors=FALSE)
dbSendQuery(conn, 'truncate table Q_Security_StaticData; select 1')
save_statics(conn,x_e,1)
save_statics(conn,x_f,2)
save_statics(conn,x_c,3)
secids = foreach(i=seq(1,ncol(x)-1,by=2),.combine=c)%do%as.numeric(dbGetQuery(conn, paste("Q_AddSecurity '",x[1,i],"'",sep='')))
col_indices = seq(1,ncol(x)-1,by=2)
for(i in 1:length(secids)){
tryCatch({
y = x[-(1:2),col_indices[i]+0:1]
y = y[!is.na(y[,2]) & !is.na(y[,1]),]
y[,1]=as.Date(y[,1],format='%d.%m.%Y')
if(nrow(y) > 1) dbWriteTable(conn,'q_quotes_eod',cbind(a1=secids[i],a2=3,y),overwrite=FALSE,append=TRUE)
print(secids[i])
}, error=function(cond) {})
}
for(i in c(14,21,181,428,444,4214,4218)){
tryCatch({
y = x[-1,col_indices[i]+0:1]
y = y[!is.na(y[,2]),]
y[,1]=as.Date(y[,1],format='%d.%m.%Y')
if(nrow(y) > 1) dbWriteTable(conn,'q_quotes_eod',cbind(a1=secids[i],a2=3,y),overwrite=FALSE,append=TRUE)
print(secids[i])
}, error=function(cond) {})
}
library(RJDBC)
drv = JDBC('com.microsoft.sqlserver.jdbc.SQLServerDriver','sqljdbc4-2.0.jar')
conn = dbConnect(drv, 'jdbc:sqlserver://10.245.67.30\\quikodbc;user=QuikLimitsUser;password=Qwerty12;database=QUIK_REPO')
dbGetQuery(conn, 'select count(*) from q_quotes_eod')
secs = dbGetQuery(conn, 'select SecName,SecID from Q_Securities')
csv_names = gsub(' ','',gsub('\\.',' ',names(x)))
db_names = gsub('-','',gsub('\\*','',gsub(' ','',paste(ifelse(sapply(secs$SecName,function(y)!is.na(as.numeric(substr(y,1,1)))),'X',''),secs$SecName,sep=''))))
secids = secs$SecID[match(csv_names,db_names)]
secs$SecName[2633]
y = secs$SecName[2631]
names(x)[seq(1,ncol(x)-1,by=2)][4]
#x = readLines('eq_hist.csv')
#x[1] = substr(x[1],1,nchar(x[1])-1)
#length(strsplit(paste(x[1],';',sep=''),';')[[1]])
#length(strsplit(x[2],';')[[1]])
#writeLines(x,con=file('eq_hist.csv'))
x = read.table('eq_hist.csv',header=TRUE,sep=';',dec=',',row.names=NULL)
names(x)[1:5267] = names(x)[2:5268]
names(x)[2*(1:2634)] = paste(names(x)[2*(1:2634)-1],'Value',sep='')
names(x)[1:7]
for(i in seq(1,ncol(x)-1,by=2)){
y = x[,i+0:1]
y = y[!is.na(y[,2]),]
y[,1]=as.Date(y[,1],format='%d.%m.%Y')
dbWriteTable(conn,'q_quotes_eod',cbind(secids[i],3,y),overwrite=FALSE,append=TRUE)
print(csv_names[i])
}
#conn = create_conn()
#x = get_eod_quotes(c('AAPL US Equity','GOOGL US Equity'))
#x
x = dbGetQuery(create_conn(), 'select AVAT_Depth_Days, AVAT, AVAT_Datetime from Q_AVAT_Hist where SecID = 18 order by AVAT_Datetime')
x = dbGetQuery(create_conn(), 'select AVAT_Depth_Days, AVAT, case AVAT_Depth_Days when 0 then dateadd(n,-15,AVAT_Datetime) else AVAT_Datetime end as AVAT_Datetime
from Q_AVAT_Hist where SecID = 18 order by AVAT_Datetime')
x1 = x[x$AVAT_Depth_Days==0,]; x1 = as.xts(x1$AVAT,order.by=as.POSIXct(x1$AVAT_Datetime))
x2 = x[x$AVAT_Depth_Days==-1,]; x2 = as.xts(x2$AVAT,order.by=as.POSIXct(x2$AVAT_Datetime))
plot(x1,ylim=range(c(x1,x2)))
lines(x2,ylim=range(c(x1,x2)),lwd=3)
x = dbGetQuery(create_conn(), 'select AVAT_Depth_Days, AVAT, AVAT_Datetime from Q_AVAT_Hist where SecID = 18 order by AVAT_Datetime')
x1 = x[x$AVAT_Depth_Days==20,]; x1 = as.xts(x1$AVAT,order.by=as.POSIXct(x1$AVAT_Datetime))
plot(x1)
s = c('JNJ US Equity',
'SYK US Equity',
'DGX US Equity',
'MDT US Equity',
'BAX US Equity',
'TMO US Equity',
'HSIC US Equity',
'BDX US Equity',
'UNH US Equity',
'LLY US Equity',
'AMGN US Equity',
'GILD US Equity',
'AGN US Equity',
'CELG US Equity',
'BIIB US Equity')
r0 = get_eod_quotes(s,create_conn())
N = 30
r1 = exp(diff(log(tail(r0,N*252))))[-1,]-1
dc = diff(index(r1))
# r_in = r1; wgh = array(1/ncol(r0),ncol(r0)); vcl = 0.11
opt_weights = function(r_in, wgh, vcl){
rb = foreach(i=1:length(wgh),.combine='+')%do%{ wgh[i] * r_in[,i] }
expo = vcl / (rollapply(rb,20,sd)*sqrt(252))
expo[expo>1.5] = 1.5
rb = tail(rb,length(expo))
1 + rb*expo - expo*rate...*tail(dc,length(expo))/360 - 0.02*tail(dc,length(expo))
prod(rb+1)^(252/length(rb))
}
x = data.frame(a=1:4,b=3:6)
|
fb6e4f1d8bc914e27f54a94373b6ebc7c3f0eef9
|
cdf278819965268d0a7cafaff9790c6f2b2134bf
|
/inst/tests/testthat.R
|
67be2c1203d5cf04cbf30e2f5b7eef3e85a057ef
|
[
"MIT"
] |
permissive
|
owenjonesuob/BANEScarparkinglite
|
0668f6183b365a109e65f07832a71bf1f0da14f7
|
5297944cc611a6e4d27b9f154de48303a3a8ff48
|
refs/heads/master
| 2021-01-15T22:47:29.756145
| 2020-04-13T14:48:04
| 2020-04-13T14:48:04
| 99,914,416
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 81
|
r
|
testthat.R
|
library(testthat)
library(BANEScarparkinglite)
test_check("BANEScarparkinglite")
|
515517b00eba9a2f8618dc5565e08f9d70794016
|
5dc933b42d4030657d090ceb4bfb28a1a9e5ad2e
|
/Problems 3/Norm2Exp.R
|
3e857ab727f37cc81c294f4d11d57f2dc46b5f06
|
[] |
no_license
|
ilia10000/LearningR
|
7e8369cd85567be4ed1a1a4221d3e702f82f062f
|
a345aa8e6ec1301fdc9ecb7bcfd4319b0959e58a
|
refs/heads/master
| 2021-01-21T13:57:21.145375
| 2018-10-14T20:23:40
| 2018-10-14T20:23:40
| 50,978,131
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 439
|
r
|
Norm2Exp.R
|
#Generate normal distr. using realizations of unif. distr.
RNORM = function(U,V){
theta = 2*pi*U
R = sqrt(-2*log(V))
return(c(R*cos(theta), R*sin(theta)))
}
# Generate Exp(2) from N(0,1)
# Note that Exp(2) = (1/4)Exp(1/2) = (1/4)Chi(2)
U1 = runif(1,0,1)
V1 = runif(1,0,1)
Norm = RNORM(U1,V1) #2 independent r.v.s from N(0,1)
Chis_df1 = Norm^2 #2 independent Chi Squares df 1
Chi_df2 = sum(Chis_df1) #1 Chi Square df 2
Exp = Chi_df2/4
Exp
|
8880e8d8e78f4361a363adb56f43e9bde5bb1179
|
d5b8ecc661a851d3194c1eb9531767e69cad39fc
|
/MS_Toolbox_R/aaft.R
|
1a50eb65b71e4c8030aa4beb938168d0a34fa47b
|
[] |
no_license
|
tomstafford/microsaccades
|
6df2f25269d4dc6c46763293336de53d0e361475
|
4d2986893d505a05ff80fe025e84634ec0c7701b
|
refs/heads/master
| 2020-03-21T02:32:37.636524
| 2018-06-20T10:59:21
| 2018-06-20T10:59:21
| 138,003,528
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,077
|
r
|
aaft.R
|
#============================================================
# Function aaft() -- Microsaccade Toolbox 0.9
# (R-language Version)
# Authors: Ralf Engbert, Petra Sinn, Konstantin Mergenthaler,
# and Hans Trukenbrod
# Date: February 20th, 2014
#============================================================
#------------------------------------------------------------
# Amplitude-adjusted Fourier Transform (Theiler et al., 1992)
#------------------------------------------------------------
source("ftpr.R")
aaft <- function(x) {
# adjust to uneven length
N <- length(x)
if ( floor(N/2)==N/2 ) {
x <- x[1:(N-1)]
N <- N-1
}
# AAFT algorithm
# 1. Ranking
h <- sort(x,index.return=TRUE)
Sx <- h$x
Rx <- h$ix
# 2. Random Gaussian data
g <- rnorm(N)
# 3. Sort Gaussian data
Sg <- sort(g)
# 4. Rearrange Gaussian data
y <- g
y[Rx] = Sg
# 5. Create phase-randomized surrogate
y1 = ftpr(y)
# 6. Ranked time series
h <- sort(y1,index.return=TRUE)
Ry1 <- h$ix
# 7. AAFT surrogate time series
xs <- x
xs[Ry1] <- Sx;
return(xs)
}
|
2e77830906c88c012f4796661834b84f8abb728d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/adegraphics/examples/triangle.class.Rd.R
|
9eb81fada755c493b74ffb03654d66eebefc19c3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 509
|
r
|
triangle.class.Rd.R
|
library(adegraphics)
### Name: triangle.class
### Title: Ternary plot with a partition in classes (levels of a factor)
### Aliases: triangle.class
### Keywords: hplot aplot
### ** Examples
data(euro123, package = "ade4")
fac1 <- euro123$plan$an
df1 <- rbind.data.frame(euro123$in78, euro123$in86, euro123$in97)
triangle.class(df1, fac = fac1, showposition = TRUE, col = c(1, 2, 3))
triangle.class(df1, fac = fac1, showposition = FALSE, plabels.cex = 0, col = c(1, 2, 3),
key = list(space = "left"))
|
2b87ea458b3bd62f83721bd998d1d97cf08e4a8c
|
2935d597895945d2a32b6701f75e918405533a57
|
/H3K9me2/snakemake_ChIPseq/mapped/both/peaks/PeakRanger1.18/ranger/p0.05_q0.05/genome_wide/regioneR/noMinWidth_mergedOverlaps/TE_family_vs_H3Kmod_genome_wide_peaks_chr.R
|
5e88e19773fc4e4d4e2f047635a373056896abf5
|
[] |
no_license
|
ajtock/wheat
|
7e39a25664cb05436991e7e5b652cf3a1a1bc751
|
b062ec7de68121b45aaf8db6ea483edf4f5f4e44
|
refs/heads/master
| 2022-05-04T01:06:48.281070
| 2022-04-06T11:23:17
| 2022-04-06T11:23:17
| 162,912,621
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,414
|
r
|
TE_family_vs_H3Kmod_genome_wide_peaks_chr.R
|
#!/applications/R/R-3.3.2/bin/Rscript
# Plot bar chart of log2(observed:expected) peaks overlapping other features
# Usage:
# /applications/R/R-3.3.2/bin/Rscript TE_family_vs_H3Kmod_genome_wide_peaks_chr.R "Histone H3 lysine modification peaks" 10000 chr1A
library(ggplot2)
library(ggthemes)
dataName <- "Histone H3 lysine modification peaks"
perms <- 10000
chrName <- "chr1A"
args <- commandArgs(trailingOnly = T)
dataName <- as.character(args[1])
# Number of permutations (randomisations) performed
perms <- as.numeric(args[2])
chrName <- args[3]
plotDir <- paste0("./", chrName, "/plots/")
ptDirs <- c(
paste0("/home/ajt200/analysis/wheat/H3K9me2/snakemake_ChIPseq/mapped/both/peaks/PeakRanger1.18/ranger/p0.05_q0.05/genome_wide/regioneR/noMinWidth_mergedOverlaps/", chrName, "/"),
paste0("/home/ajt200/analysis/wheat/H3K4me3/snakemake_ChIPseq/mapped/both/peaks/PeakRanger1.18/ranger/p0.05_q0.05/genome_wide/regioneR/noMinWidth_mergedOverlaps/", chrName, "/"),
# paste0("/home/ajt200/analysis/wheat/epigenomics_shoot_leaf_IWGSC_2018_Science/H3K4me3/snakemake_ChIPseq/mapped/both/peaks/PeakRanger1.18/ranger/p0.05_q0.05/genome_wide/regioneR/noMinWidth_mergedOverlaps/", chrName, "/"),
paste0("/home/ajt200/analysis/wheat/epigenomics_shoot_leaf_IWGSC_2018_Science/H3K9ac/snakemake_ChIPseq/mapped/both/peaks/PeakRanger1.18/ranger/p0.05_q0.05/genome_wide/regioneR/noMinWidth_mergedOverlaps/", chrName, "/"),
paste0("/home/ajt200/analysis/wheat/epigenomics_shoot_leaf_IWGSC_2018_Science/H3K27me3/snakemake_ChIPseq/mapped/both/peaks/PeakRanger1.18/ranger/p0.05_q0.05/genome_wide/regioneR/noMinWidth_mergedOverlaps/", chrName, "/"),
paste0("/home/ajt200/analysis/wheat/epigenomics_shoot_leaf_IWGSC_2018_Science/H3K36me3/snakemake_ChIPseq/mapped/both/peaks/PeakRanger1.18/ranger/p0.05_q0.05/genome_wide/regioneR/noMinWidth_mergedOverlaps/", chrName, "/")
)
ptLibNames <- c(
"H3K9me2_Rep1_ChIP",
"H3K4me3_Rep1_ChIP",
# "H3K4me3_ChIP_SRR6350668",
"H3K9ac_ChIP_SRR6350667",
"H3K27me3_ChIP_SRR6350666",
"H3K36me3_ChIP_SRR6350670"
)
ptLibNamesPlot <- c(
"H3K9me2",
"H3K4me3",
# "H3K4me3 (IWGSC)",
"H3K9ac",
"H3K27me3",
"H3K36me3"
)
famNames <- c(
"CACTA_DTC",
"Harbinger_DTH",
"hAT_DTA",
"Helitrons_DHH",
"Mariner_DTT",
"Mutator_DTM",
"Unclassified_class_2_DXX",
"Unclassified_with_TIRs_DTX",
"Copia_LTR_RLC",
"Gypsy_LTR_RLG",
"LINE_RIX",
"SINE_SIX",
"Unclassified_LTR_RLX",
"Unclassified_repeats_XXX"
)
famNamesPlot <- c(
"CACTA",
"Harbinger",
"hAT",
"Helitrons",
"Mariner",
"Mutator",
"Unclass. class 2",
"Unclass. with TIRs",
"Copia LTR",
"Gypsy LTR",
"LINE",
"SINE",
"Unclass. LTR",
"Unclass. repeats"
)
ptList <- lapply(seq_along(ptLibNames), function(x) {
load(paste0(ptDirs[x], "permTest_", ptLibNames[x],
"_rangerPeaks_vs_TEsDNA_", chrName, ".RData"))
load(paste0(ptDirs[x], "permTest_", ptLibNames[x],
"_rangerPeaks_vs_TEsRNA_", chrName, ".RData"))
c(ptPeaksTEsDNAPerChrom, ptPeaksTEsRNAPerChrom)
})
ptPeaksTEsDNAPerChrom <- NULL
ptPeaksTEsRNAPerChrom <- NULL
#assign(paste0(ptLibNames[2], "_TEsDNA"), ptPeaksTEsDNAPerChrom)
ptList_Pval <- lapply(seq_along(ptList), function(x) {
lapply(seq_along(ptList[[x]]), function(y) {
ptList[[x]][[y]]$numOverlaps$pval
})
})
ptList_Obs <- lapply(seq_along(ptList), function(x) {
lapply(seq_along(ptList[[x]]), function(y) {
ptList[[x]][[y]]$numOverlaps$observed
})
})
ptList_Perm <- lapply(seq_along(ptList), function(x) {
lapply(seq_along(ptList[[x]]), function(y) {
ptList[[x]][[y]]$numOverlaps$permuted
})
})
ptList_Exp <- lapply(seq_along(ptList), function(x) {
lapply(seq_along(ptList[[x]]), function(y) {
mean(ptList[[x]][[y]]$numOverlaps$permuted)
})
})
ptList_log2ObsExp <- lapply(seq_along(ptList), function(x) {
lapply(seq_along(ptList[[x]]), function(y) {
log2((ptList_Obs[[x]][[y]]+1)/(ptList_Exp[[x]][[y]]+1))
})
})
ptList_Zscore <- lapply(seq_along(ptList), function(x) {
lapply(seq_along(ptList[[x]]), function(y) {
ptList[[x]][[y]]$numOverlaps$zscore
})
})
ptList_AltHyp <- lapply(seq_along(ptList), function(x) {
lapply(seq_along(ptList[[x]]), function(y) {
ptList[[x]][[y]]$numOverlaps$alternative
})
})
ptList_alpha0.05 <- lapply(seq_along(ptList), function(x) {
lapply(seq_along(ptList_Perm[[x]]), function(y) {
if(ptList_AltHyp[[x]][[y]] == "greater") {
quantile(ptList_Perm[[x]][[y]], 0.95)[[1]]
} else {
quantile(ptList_Perm[[x]][[y]], 0.05)[[1]]
}
})
})
ptList_log2alpha0.05 <- lapply(seq_along(ptList), function(x) {
lapply(seq_along(ptList_alpha0.05[[x]]), function(y) {
log2((ptList_alpha0.05[[x]][[y]]+1)/(ptList_Exp[[x]][[y]]+1))
})
})
ptList_log2ObsExp_sorted <- lapply(seq_along(ptList), function(x) {
unlist(ptList_log2ObsExp[[x]][sort.int(unlist(ptList_log2ObsExp[[1]]),
decreasing = T,
index.return = T)$ix])
})
ptList_log2alpha0.05_sorted <- lapply(seq_along(ptList), function(x) {
unlist(ptList_log2alpha0.05[[x]][sort.int(unlist(ptList_log2ObsExp[[1]]),
decreasing = T,
index.return = T)$ix])
})
famNames_sorted <- famNames[sort.int(unlist(ptList_log2ObsExp[[1]]),
decreasing = T,
index.return = T)$ix]
famNamesPlot_sorted <- famNamesPlot[sort.int(unlist(ptList_log2ObsExp[[1]]),
decreasing = T,
index.return = T)$ix]
df <- data.frame(Sample = rep(ptLibNames,
each = length(ptList_log2ObsExp_sorted[[1]])),
Transposon_family = rep(famNamesPlot_sorted, length(ptLibNames)),
log2ObsExp = c(ptList_log2ObsExp_sorted[[1]],
ptList_log2ObsExp_sorted[[2]],
ptList_log2ObsExp_sorted[[3]],
ptList_log2ObsExp_sorted[[4]],
ptList_log2ObsExp_sorted[[5]]),
log2alpha0.05 = c(ptList_log2alpha0.05_sorted[[1]],
ptList_log2alpha0.05_sorted[[2]],
ptList_log2alpha0.05_sorted[[3]],
ptList_log2alpha0.05_sorted[[4]],
ptList_log2alpha0.05_sorted[[5]]))
df$Transposon_family <- factor(df$Transposon_family,
levels = famNamesPlot_sorted)
df$Sample <- factor(df$Sample,
levels = ptLibNames)
bp <- ggplot(data = df,
mapping = aes(x = Transposon_family,
y = log2ObsExp,
fill = Sample)) +
theme_bw(base_size = 20) +
geom_bar(stat = "identity",
position = position_dodge()) +
scale_fill_manual(name = "Sample",
values = c("magenta3",
"forestgreen",
# "green2",
"dodgerblue",
"navy",
"darkorange2"),
labels = ptLibNamesPlot) +
geom_point(mapping = aes(Transposon_family, log2alpha0.05),
position = position_dodge(0.9),
shape = "-", colour = "grey70", size = 7) +
labs(x = "Transposon superfamily",
y = expression("Log"[2]*"(observed:expected) peak overlap")) +
theme(axis.line.y = element_line(size = 1, colour = "black"),
axis.ticks.y = element_line(size = 1, colour = "black"),
axis.text.y = element_text(colour = "black", size = 18),
axis.ticks.x = element_blank(),
axis.text.x = element_text(angle = 45, hjust = 1, colour = "black", size = 18),
panel.grid = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
plot.title = element_text(hjust = 0.5, size = 20)) +
ggtitle(paste0(dataName, " (", chrName, "; ", as.character(perms), " permutations)"))
ggsave(paste0(plotDir, "barplot_TE_family_permTestResults_",
as.character(perms), "perms_",
"log2_Observed_Expected_H3Kmod_peaks.pdf"),
plot = bp,
height = 14, width = 20)
save(bp,
file = paste0(plotDir, "barplot_TE_family_permTestResults_",
as.character(perms), "perms_",
"log2_Observed_Expected_H3Kmod_peaks.RData"))
|
20987eac7ec47065b18192932ee3e0fc1d7722a5
|
935f498683c4e523243abd80a87c57ca637294f5
|
/R/repo_events.R
|
1199f2a1d32627baae16b2c5c2f6b88778bcfe1f
|
[
"MIT"
] |
permissive
|
jasmine2chen/gitevents
|
358e68893b6e61de11f0c21c2d72d1c8efa433a8
|
f7fc5de145c26a5b27a8e4691838af7a0fe4d998
|
refs/heads/master
| 2022-11-15T22:13:37.577235
| 2020-07-09T23:35:06
| 2020-07-09T23:35:06
| 278,493,093
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,667
|
r
|
repo_events.R
|
# library(httr)
# library(jsonlite)
# library(dplyr)
#' Get a list of events from a GitHub respository
#'
#' Returns the 30 most recent events from a public repository (within the last 90 days), including all metadata, as a list of lists.
#'
#' @param owner string
#' @param repo string
#'
#' @return list
#' @export
#'
#' @examples
#' get_repo_events("nganlyle", "534_project")
get_repo_events <- function(owner, repo) {
path <- sprintf("repos/%s/%s/events", owner, repo)
url <- httr::modify_url("https://api.github.com/", path = path)
resp <- httr::GET(url)
# raise error if the reponse is not in json
if (httr::http_type(resp) != "application/json") {
stop("API did not return json", call. = FALSE)
}
# parse response with jsonlite
parsed <- jsonlite::fromJSON(httr::content(resp, "text"), simplifyVector = FALSE)
# raise error if the request fails
if (httr::http_error(resp)) {
stop(
sprintf(
"GitHub API request failed [%s]\n%s\n<%s>",
httr::status_code(resp),
parsed$message,
parsed$documentation_url
),
call. = FALSE
)
}
structure(
list(
content = parsed,
path = path,
response = resp
),
class = "github_api"
)
}
#' Create a dataframe of events from a GitHub repository
#'
#' Returns a dataframe with selected information about the 30 most recent events from a public repository (within the last 90 days). Information includes event id, event type, date, time, author and push message if the event was a push.
#'
#' @param owner string
#' @param repo string
#'
#' @return dataframe (2D list)
#' @export
#'
#' @examples create_repo_df("nganlyle", "534_project")
create_repo_df <- function(owner, repo) {
repo_events <- get_repo_events(owner, repo)
repo_events_no <- length(repo_events$content)
type <- rep(NA, repo_events_no) # extract event type
for (i in 1:repo_events_no) {
type[i] <- repo_events$content[[i]]$type
}
datetime <- rep(NA, repo_events_no) # extract event date/time
for (i in 1:repo_events_no) {
datetime[i] <- repo_events$content[[i]]$created_at
}
# extract actor info
actor_id <- rep(NA, repo_events_no)
for (i in 1:repo_events_no) {
actor_id[i] <- repo_events$content[[i]]$actor$id
}
actor_dlogin <- rep(NA, repo_events_no)
for (i in 1:repo_events_no) {
actor_dlogin[i] <- repo_events$content[[i]]$actor$display_login
}
event_id <- rep(NA, repo_events_no) # extract event id
for (i in 1:repo_events_no) {
event_id[i] <- repo_events$content[[i]]$id
}
# extract msg if pushevent
push_msg <- rep(NULL, repo_events_no)
for (i in 1:repo_events_no) {
if (repo_events$content[[i]]$type == "PushEvent") {
push_msg[i] <- repo_events$content[[i]]$payload$commits[[1]]$message
}
else {
push_msg[i] <- NA
}
}
# create a df with events info
df <- as.data.frame(cbind(event_id, type, datetime, actor_id, actor_dlogin, push_msg))
df$datetime <- as.POSIXct(df$datetime, format = "%Y-%m-%dT%H:%M:%OS")
return(df)
}
#' Create events stamp for a repository
#'
#' @param owner string
#' @param repo string
#' @param log_date string yyyy-mm-dd
#'
#' @return dataframe (2D list)
#' @export
#'
#' @examples get_repo_log("nganlyle", "534_project", "2020-01-26")
get_repo_log <- function(owner, repo, log_date) {
df <- create_repo_df(owner, repo)
df[ , 'datecol'] <- as.Date(as.character(df$datetime))
df[ , 'timecol'] <- format(df$datetime, format = "%H:%M:%OS")
df <- subset(df, datecol == log_date)
select <- c("event_id","type","actor_dlogin","push_msg",
"timecol")
stamp <- df[select]
colnames(stamp) <- c("Event ID", "Event Type", "Actor", "Message", "Time")
return(stamp)
# kableExtra::kable(stamp, format = "html", col.names = c("Event ID", "Event Type",
# "Actor", "Message", "Time"), caption = log_date)
# print(paste("Events on ", log_date))
# print(stamp)
}
# ## Testing
# events_534 <- get_repo_events("nganlyle", "534_project")
# events_httr <- get_repo_events("hadley", "httr")
#
# df_534 <- create_repo_df("nganlyle", "534_project")
# df_httr <- create_repo_df("hadley", "httr")
# df_pandas <- create_repo_df("pandas-dev", "pandas")
#
# log <- get_repo_log("nganlyle", "534_project", "2020-01-26")
# repo_name = rep(NA,repo_events_no)
# for (i in 1:repo_events_no) {
# repo_name[i]=repo_events$content[[i]]$repo$name
# }
#
# repo_id = rep(NA,repo_events_no)
# for (i in 1:repo_events_no) {
# repo_id[i]=repo_events$content[[i]]$repo$id
# }
# repo_url = rep(NA,repo_events_no)
# for (i in 1:repo_events_no) {
# repo_url[i]=repo_events$content[[i]]$repo$url
# }
|
a43fd63b7925f9e71084f379411c5323d2131833
|
1326b5fec78ed753676df9d275590fed2bbac4e9
|
/man/plot.permuteTest.Rd
|
3c129d125763473d21075538a3f8bee4b60a54f5
|
[] |
no_license
|
lhsego/glmnetLRC
|
c0d4c06c7ee9d84acc93337166fca67211381b15
|
8ff225c512269ca6d8f402e28600df654df4a8e2
|
refs/heads/master
| 2020-12-25T16:35:53.010136
| 2017-10-19T21:24:17
| 2017-10-19T21:24:17
| 51,790,797
| 1
| 0
| null | 2016-02-15T22:37:38
| 2016-02-15T22:37:38
| null |
UTF-8
|
R
| false
| true
| 690
|
rd
|
plot.permuteTest.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.permuteTest.R
\name{plot.permuteTest}
\alias{plot.permuteTest}
\title{Plot the null distribution of the permutation test}
\usage{
\method{plot}{permuteTest}(x, ...)
}
\arguments{
\item{x}{an object of class \code{permuteTest} returned by \code{\link{permuteTest.glmnetLRC}}.}
\item{\dots}{Arguments passed to \code{\link{hist}}}
}
\value{
Produces a histogram of the null distribution of the loss, with a vertical red
line indicating the mean expected loss.
}
\description{
Plot the null distribution of the permutation test
}
\seealso{
See \code{\link{glmnetLRC}} for an example.
}
\author{
Landon Sego
}
|
d731b7a3c1e7794c32902386aa84e0a144877c0e
|
01fe688a807a88373d7bde3158b3363d9072373a
|
/RStudio/0813/0813.R
|
7c0c5b1017a83242d8903af3daa0dbc06d31644c
|
[] |
no_license
|
tmddnr712/BigData_DB
|
eeb7610182bedc23be43fc4effd35460bfbbe8c7
|
97216a344b621347af01c7b57a4216fd773ed94a
|
refs/heads/main
| 2023-07-11T07:50:34.199117
| 2021-08-24T08:48:47
| 2021-08-24T08:48:47
| 383,033,628
| 0
| 0
| null | null | null | null |
UHC
|
R
| false
| false
| 2,778
|
r
|
0813.R
|
# Perceptron의 구현
x1 <- runif(30, -1, 1) # 균등 분포
x2 <- runif(30, -1, 1)
x <- cbind(x1, x2)
Y <- ifelse(x2 > 0.5 + x1, +1, -1) # y 값을 결정
plot(x, pch=ifelse(Y>0,"+","-"), xlim=c(-1,1), ylim=c(-1,1), cex=2)
abline(0.5, 1)
calculate_distance = function(x,w,b){ # forward propagation 순전파
sum(x*w) + b
}
linear_classifier = function(x, w, b){
distances = apply(x, 1, calculate_distance, w, b) # 현재의 가중치와 바이어스
return(ifelse(distances < 0, -1, +1)) # 분류
}
second_norm = function(x) {sqrt(sum(x * x))} # 정규화
# 학습률은 오차에 적용될 학습 정도 : learning_ rate: 큰경우 - 학습속도가 빨라짐, 작은 경우: 데이
perceptron = function(x, y, learning_rate = 1){ # 가중치와 바이어스를 학습하는 경우
w = vector(length = ncol(x)) # 가중치
b = 0 # 바이어스
k = 0 # log 출력 횟수 조절
R = max(apply(x, 1, second_norm)) # 방향값 중 가장 큰 값을 R값으로 결정정
incorrect = TRUE
plot(x, cex = 0.2)
while(incorrect){ #무한 루프
incorrect = FALSE # 밑에서 incorrect 값이 변화하지 않으면 종료
yc <- linear_classifier(x,w,b) # 예측값(순전파)
for(i in 1:nrow(x)){ # 30
if(y[i] != yc[i]){ # 실제값과 예측값을 비교
w <- w + learning_rate * y[i] * x[i,] #역전파(가중치를 수정)
b <- b + learning_rate * y[i] * R^2
k <- k+1
if(k%%5 == 0){
intercept <- - b / w[[2]]
slope <- - w[[1]] / w[[2]]
abline(intercept, slope, col="red")
cat("반복 # ", k, "\n")
cat("계속하기 위해 [enter]")
line <- readline() # 잠깐 중지
}
incorrect = TRUE # y값이 예측값과 실제값이 같지 않으면 끝내지 말기
}
}
}
s = second_norm(w) # 방향 값을 결정
return(list(w=w/s, b=b/s, updates=k)) # s 정규화 된 값
}
(p <- perceptron(x, Y))
(y <- linear_classifier(x, p$w, p$b))
plot(x, cex=0.2)
points(subset(x, Y==1), col = "black", pch="+", cex=2)
points(subset(x, Y==-1), col = "red", pch="-", cex=2)
intercept <- - p$b / p$w[[2]]
slope <- - p$w[[1]] / p$w[[2]]
abline(intercept, slope, col="green")
###
df = data.frame(
x2 = c(1:6),
x1 = c(6:1),
y = factor(c('n','n','n','y','y','y'))
)
df
install.packages("nnet")
library(nnet)
model_net1 = nnet(y ~ ., df, size = 1)
model_net1
summary(model_net1)
names(model_net1)
model_net1$wts
model_net1$coefnames
model_net1$value
model_net1$fitted.values
model_net1$entropy
model_net1$softmax
predict(model_net1, df)
p <- predict(model_net1, df, type="class")
p
table(p, df$y)
install.packages("neuralnet")
library(neuralnet)
concrete <- read.csv("C:/Users/노승욱/Desktop/R자료/concrete.csv")
str(concrete)
|
6233b98ffd446ec7ee38f1c80d252f1ff097f831
|
fc76f853b699f5e1ffa9a03c4e2725773ada429a
|
/functions.R
|
3824df0c082ea24a4dfe59f4142faaf6f4e48c0a
|
[] |
no_license
|
bmschmidt/ACS
|
d255d9b2dc888ecddd35f1e689028bc10becc613
|
711272c3ac19a270fa3f135567c2619608faf583
|
refs/heads/master
| 2021-01-25T03:26:56.080707
| 2014-05-22T22:04:10
| 2014-05-22T22:04:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,321
|
r
|
functions.R
|
library(dplyr)
library(ggplot2)
returnWeights = function(weights=1:80) {
weights = read.table("weights.txt",sep="\t",header=F,colClasses=c("numeric","integer","integer"))
names(weights) = c("SERIALNO","sample","weight")
}
writeOutFields = function() {
#
counts = persons %.% group_by(FOD1P) %.% summarize(count=n()) %.% arrange(-count) %.% collect()
named = counts %.% encode("FOD1P")
head(named)
write.table(named,file="majorNames.tsv",sep="\t",quote=F,row.names=F)
#Then I assigned some values at MajorFields.tsv
}
encode = function(tab,fieldsname="FOD1P",looker=codebook) {
#gives a factor sensible names from a codebook
lookups = looker %.%
filter(field==fieldsname) %.%
mutate(value = gsub(" $","",value),code=as.integer(code))
counts = table(tab[,fieldsname])
missingCounts = counts[!names(counts) %in% lookups$code]
tab[,fieldsname] =
factor(tab[,fieldsname],levels = lookups$code,labels=lookups$value)
tab
}
filterToTopN = function(frame,variable,n=30,countVariable=quote(PWGTP)) {
topN = frame %.% regroup(list(variable)) %.% summarise(groupingCount=sum(countVariable)) %.% top_n(n)
topN = topN[,1,drop=F]
frame %.% inner_join(topN)
}
factorByPrincomps = function(frame) {
head(frame)
}
#dplyr string workarounds:
# Helper functions that allow string arguments for dplyr's data modification functions like arrange, select etc.
# Author: Sebastian Kranz
# Examples are below
#' Modified version of dplyr's filter that uses string arguments
#' @export
s_filter = function(.data, ...) {
eval.string.dplyr(.data,"filter", ...)
}
#' Modified version of dplyr's select that uses string arguments
#' @export
s_select = function(.data, ...) {
eval.string.dplyr(.data,"select", ...)
}
#' Modified version of dplyr's arrange that uses string arguments
#' @export
s_arrange = function(.data, ...) {
eval.string.dplyr(.data,"arrange", ...)
}
#' Modified version of dplyr's arrange that uses string arguments
#' @export
s_mutate = function(.data, ...) {
eval.string.dplyr(.data,"mutate", ...)
}
#' Modified version of dplyr's summarise that uses string arguments
#' @export
s_summarise = function(.data, ...) {
eval.string.dplyr(.data,"summarise", ...)
}
#' Modified version of dplyr's group_by that uses string arguments
#' @export
s_group_by = function(.data, ...) {
eval.string.dplyr(.data,"group_by", ...)
}
#' Internal function used by s_filter, s_select etc.
eval.string.dplyr = function(.data, .fun.name, ...) {
args = list(...)
args = unlist(args)
code = paste0(.fun.name,"(.data,", paste0(args, collapse=","), ")")
df = eval(parse(text=code,srcfile=NULL))
df
}
# Examples
library(dplyr)
# Original usage of dplyr
mtcars %.%
filter(gear == 3,cyl == 8) %.%
select(mpg, cyl, hp:vs)
# Select user specified cols.
# Note that you can have a vector of strings
# or a single string separated by ',' or a mixture of both
cols = c("mpg","cyl, hp:vs")
mtcars %.%
filter(gear == 3,cyl == 8) %.%
s_select(cols)
# Filter using a string
col = "gear"
mtcars %.%
s_filter(paste0(col,"==3"), "cyl==8" ) %.%
select(mpg, cyl, hp:vs)
# Arrange without using %.%
s_arrange(mtcars, "-mpg, gear, carb")
# group_by and summarise with strings
mtcars %.%
s_group_by("cyl") %.%
s_summarise("mean(disp), max(disp)")
|
5e5523c11d86d3f3b634c1c7e3dc35f4f65469a9
|
e983b9ddb154e040349277335ad18731ec8d97f8
|
/TimeseriesChurnRate.R
|
7261aed55154f83b0e6c647144c1e6e74a821658
|
[
"MIT"
] |
permissive
|
blendo-app/TimeseriesChurnRatePrediction
|
8f8861f6a596fff87ed789974fd35bd916872cf6
|
eb621c6181543acc64d15b8ebb4e54b1aa90b0ff
|
refs/heads/master
| 2021-01-19T12:52:42.507444
| 2017-09-07T09:20:35
| 2017-09-07T09:20:35
| 88,054,793
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,483
|
r
|
TimeseriesChurnRate.R
|
library(sqldf)
library(glmnet)
library(randomForest)
library(ggplot2)
library(caret)
data<-table_1_
data$totalpractions<-0
data$totalactions[is.na(data$totalactions)]<-0
data$avgactions<-0
data$practions<-0
data$days_since<-0
#create email count
data$mailCount<-NA
data$mailCount[1]<-1
for(i in 2:(nrow(data))){
if (data$email_address[i]==data$email_address[i-1]){
data$mailCount[i]<-data$mailCount[i-1]+1
}else{
data$mailCount[i]<-1
}
}
#days of subscription
data$daysSub<-NA
data$daysSub<-as.numeric((data$send_time-data$timestamp_opt)/(60*60*24))
#total previous actions
for (i in 1:(nrow(data)-1)) {
if (data$mailCount[i+1] !=1){
data$totalpractions[i+1]<-data$totalactions[i]
}else{
data$totalpractions[i+1]<-0
}
}
#actions per email until now
for (i in 2:(nrow(data))) {
if (data$email_address[i]==data$email_address[i-1]){
data$practions[i]<-data$practions[i-1]+data$totalpractions[i]
}
}
for (i in 2:(nrow(data))) {
if (data$email_address[i]==data$email_address[i-1]){
data$avgactions[i]<-(data$practions[i-1]+data$totalpractions[i])/data$mail_id[i-1]
}
}
#days since last email was sent
for (i in 1 :nrow(data)) {
if (data$mail_id[i] !=1){
data$days_since[i]<-data$send_time[i]-data$send_time[i-1]
}
}
#status
data$status<-NA
data$status<-1
data$status[is.na(data$timestamp_out)]<-0
#personal-business emails
mailService<-sapply(strsplit(as.character(data$email_address),'@',fixed=TRUE), `[`, 2)
data$mailService<-sapply(strsplit(as.character(mailService),'.',fixed=TRUE), `[`, 1)
data$personalMail<-0
mailProviders<-c("gmail", "zoho", "outlook", "yahoo", "gmx", "yandex", "hushmail", "aol")
for (i in 1:length(data$mailService)) {
if (data$mailService[i] %in% mailProviders){
data$personalMail[i]<-1
i=i+1
}
}
data$timestamp_opt[is.na(data$timestamp_opt)]<-Sys.Date()
#remove rows with NAs in list and campaign
data<-data[!with(data,is.na(list_id)& is.na(campaign_id)),]
query<-"select distinct campaign_id, send_time from data"
campaigns<-sqldf(query)
#remove campaigns with no info
data<-data[!with(data,is.na(send_time)),]
#keep only columns with no NAs
data<-data[colSums(!is.na(data)) >= dim(data)[1]]
sapply(data,function(x) sum(is.na(x)))
#train and test dataset
##create empty dataframes with the same structure as data
test<-data[0,]
train<-data[0,]
for (i in 1:nrow(data)-1) {
if (data$mailCount[i+1] ==1){
test<-rbind(test,data[i,])
}else {
train<-rbind(train,data[i,])
}
}
#initial look
summary(data)
#mail_id and status
subs.tr<-data[data$status==0,]
unsubs.tr<-data[data$status==1,]
ggplot(as.data.frame(data$status), aes(as.data.frame(data$mailCount), fill = "steelblue1", colour = "steelblue1")) +
geom_density(alpha = 0.1)
par(mfrow=c(1,2))
ggplot(as.data.frame(unsubs.tr$status), aes(as.data.frame(unsubs.tr$mailCount), fill = "steelblue1", colour = "steelblue1")) +
geom_density(alpha = 0.1)
ggplot(as.data.frame(subs.tr$status), aes(as.data.frame(subs.tr$mailCount), fill = "steelblue1", colour = "steelblue1")) +
geom_density(alpha = 0.1)
ggplot(as.data.frame(data$status), aes(as.data.frame(log(data$mailCount+1)), fill = "steelblue1", colour = "steelblue1")) +
geom_density(alpha = 0.1)
### 1o pick 3 mails, 2o pick 16, after 63rd mail very loyal
#Histogram for mailing lists
list_sub <- data.frame(table(data$list_id,data$status))
names(list_sub) <- c("List_id","churn","Count")
ggplot(data=list_sub, aes(x=List_id, y=Count, fill=status, alpha=0.1)) + geom_bar(stat="identity")
#separation numeric-non numeric variables
train$mail_id<-as.numeric(train$mail_id)
train.n<-sapply(train,class)=='numeric'
trainNum<-train[,train.n]
test$mail_id<-as.numeric(test$mail_id)
test.n<-sapply(test,class)=='numeric'
testNum<-test[,test.n]
#correlation plot
corr<-cor(trainNum)
corrplot(corr, type = "lower", tl.pos = "ld")
#lasso
df<-data.matrix(trainNum)
lasso<-glmnet(as.matrix(trainNum[,-9]),trainNum$status)
plot.glmnet(lasso, label=T)
crossVal<-cv.glmnet(as.matrix(trainNum[,-9]),trainNum$status) # 10-fold cross validation ensures the model is not over fitted
coefs<- coef(crossVal, s="lambda.1se")
coefs
#logistic regression
model0<- glm(as.factor(status)~mailCount+as.factor(list_id)+as.factor(personalMail)+location_latitude, family=binomial(link='logit'), data=train)
summary(model0)
#decision tree
DataTreeM <- rpart(status~., data=train[,-13])
varImp(DataTreeM)
plot(DataTreeM)
text(DataTreeM)
predictionTreeM <- predict(DataTreeM, test)
|
49a3046ab180d6fdcadb4d1c5bdcc391652523dc
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/soilDB/examples/get_extended_data_from_NASIS.Rd.R
|
034e06a3890f844f7c3f733d702387f5154c13ad
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 381
|
r
|
get_extended_data_from_NASIS.Rd.R
|
library(soilDB)
### Name: get_extended_data_from_NASIS_db
### Title: Extract accessory tables and summaries from a local NASIS
### Database
### Aliases: get_extended_data_from_NASIS_db
### Keywords: manip
### ** Examples
## Not run:
##D # query extended data
##D e <- get_extended_data_from_NASIS_db()
##D
##D # show contents of extended data
##D str(e)
## End(Not run)
|
ff72e24e25087cffcadcf5fbf98689497eb6f55f
|
9fe822d020259841a5ee28b94232f36b75930a75
|
/run_analysis.R
|
385f53aba816f610bf70e447f60f771f4d34fc87
|
[] |
no_license
|
teykitt/R_Getting_And_Cleaning_Data
|
d0e079a9fba80d2e7f997dd52f258b80b4c37ee9
|
a2882106733241677fdcd1b1eccf9eeb33728e1e
|
refs/heads/master
| 2020-03-18T13:55:59.067729
| 2018-05-25T08:06:45
| 2018-05-25T08:06:45
| 134,818,429
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,434
|
r
|
run_analysis.R
|
# The purpose of this project is to demonstrate your ability to collect, work with, and clean a data set.
#
# Review criteria
# The submitted data set is tidy.
# The Github repo contains the required scripts.
# GitHub contains a code book that modifies and updates the available codebooks with the data to indicate all the variables and summaries calculated, along with units, and any other relevant information.
# The README that explains the analysis files is clear and understandable.
# The work submitted for this project is the work of the student who submitted it.
# Getting and Cleaning Data Course Project
# The purpose of this project is to demonstrate your ability to collect, work with, and clean a data set.
# The goal is to prepare tidy data that can be used for later analysis.
# You will be graded by your peers on a series of yes/no questions related to the project.
# You will be required to submit:
# 1) a tidy data set as described below
# 2) a link to a Github repository with your script for performing the analysis
# 3) a code book that describes the variables, the data, and any transformations or work that you performed
# to clean up the data called CodeBook.md.
# 4) You should also include a README.md in the repo with your scripts.
# This repo explains how all of the scripts work and how they are connected.
#
# One of the most exciting areas in all of data science right now is wearable computing - see for example this article .
# Companies like Fitbit, Nike, and Jawbone Up are racing to develop the most advanced algorithms to attract new users.
# The data linked to from the course website represent data collected from the accelerometers from the Samsung Galaxy S
# smartphone. A full description is available at the site where the data was obtained:
#
# http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
#
# Here are the data for the project:
#
# https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
#
# You should create one R script called run_analysis.R that does the following.
#
# 1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# 3. Uses descriptive activity names to name the activities in the data set
# 4. Appropriately labels the data set with descriptive variable names.
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable
# for each activity and each subject. Good luck!
##################################################################################################
# BEGINNING OF ANALYSIS
##################################################################################################
# STEP 1: Merges the training and the test sets to create one data set.
# - Starting with my own pre-steps.
# P1: Download and unzip the data.
# P2: Create script to compile each data set into it's own data frame
# P3: Execute P2 script for test and train and merge the data frames together
# Create data directory to store downloaded files
if (!file.exists("data")) {
dir.create("data")
}
# set the working directory path
setwd("~/Desktop/Data_Science_Track/Getting_And_Cleaning_Data/Week_4/Assignment/data")
# # P1. Download and unzip the data
setwd("data")
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, destfile="dataset.zip", method = "curl")
dateDownloaded <- date()
unzip("dataset.zip")
# P2. Create script to compile each data set into it's own data frame
# Script name: compile_uci_df.R
# P3: Execute P2 script for test and train and merge the data frames together
test_df <- compile_uci_df ("test")
train_df <- compile_uci_df ("train")
final_df <- rbind(test_df, train_df)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# Extracting the first three columns SubjectId, ActivityID, Activity_Name along with all varriables related to mean/std
sub_final_df <- final_df[c(1:3, grep ("([mM]ean|[sS]td)", names(test)))]
# 3. Uses descriptive activity names to name the activities in the data set
# 4. Appropriately labels the data set with descriptive variable names.
# Both steps 3-4 were completed as part of step 1
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable
# for each activity and each subject. Good luck!
# Current dimensions are 10299 observations for 89 variables
# Expected new tidy data would have combination of rows = (subject x activity) x columns (avg for each 86 varibles)
# Get the unique list of SubjectID and ActivityID from the data set
# uniqSubjectID <- unique(sub_final_df$SubjectId)
# uniqActivityName <- unique(sub_final_df$Activity_Name)
# create base tidy data frame from the unique SubjectID and ActivityName combinations
tidy_avg_df <- unique.data.frame(sub_final_df[,c(1,3)])
# reset the row names
rownames(tidy_avg_df) <- NULL
# calculate the mean of each variable for each row of tidy_avg_df
for (r in 1:nrow(tidy_avg_df)) { # for each row
# print(paste("For row number: ", r))
for (v in names(sub_final_df)[-(1:3)]) { # for each variable column (except the first 3)
# print(paste("For variable: ", v))
# print the subject, activity and calculated mean for each variable
# print(paste("Mean for Subject: ", tidy_avg_df[r,1], " Activity_name: ", tidy_avg_df[r,2],
# " calc mean: ", mean(sub_final_df[sub_final_df$SubjectId == tidy_avg_df[r, 1]
# & sub_final_df$Activity_Name == tidy_avg_df[r, 2], v])))
# calculate and store the mean for each variable
tidy_avg_df[r, v] <- mean(sub_final_df[sub_final_df$SubjectId == tidy_avg_df[r, 1]
& sub_final_df$Activity_Name == tidy_avg_df[r, 2], v])
}
}
# Prefix the variable columns with Avg_ to indicate these are the new calculated averages
names(tidy_avg_df)[-(1:3)] <- paste("Avg_", names(tidy_avg_df)[-(1:3)], sep="")
# Write out the output of the final tidy data set
write.table(tidy_avg_df,
file="/Users/janma/Desktop/Data_Science_Track/Getting_And_Cleaning_Data/Week_4/Assignment/data/tidy_avg_df.txt", row.names=FALSE)
|
b87484a49f50396b3a56f3cb91940e7edc231841
|
ac771259d6e3469b75e0fdac251839ab1d070767
|
/man/vtlEvalNodes.Rd
|
6b18581fdbd931eb047f6687e4e9d0a600b5132b
|
[] |
no_license
|
amattioc/RVTL
|
7a4e0259e21d52e8df1efe9a663ca20a7d130b15
|
630a41f27d0f5530d7c3df7266ecfaf25fe4803a
|
refs/heads/main
| 2023-04-27T17:52:39.093386
| 2021-05-14T09:22:24
| 2021-05-14T09:22:24
| 304,639,834
| 0
| 1
| null | 2020-10-19T19:19:41
| 2020-10-16T13:46:02
|
JavaScript
|
UTF-8
|
R
| false
| true
| 1,381
|
rd
|
vtlEvalNodes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vtl.R
\name{vtlEvalNodes}
\alias{vtlEvalNodes}
\title{Evaluate a list of nodes}
\usage{
vtlEvalNodes(sessionID, nodes)
}
\arguments{
\item{sessionID}{The symbolic name of an active VTL session}
\item{nodes}{The nodes to be evaluated}
}
\description{
Calculate and return the values of the nodes of a VTL session
}
\details{
This function is used to evaluate specific nodes of a vtl session. The evaluated nodes will be returned in a list
}
\examples{
\dontrun{
#prepare a VTL compliant dataset in R
r_input <- data.frame(id1 = c('a', 'b', 'c'),
m1 = c(1.23, -2.34, 3.45),
m2 = c(1.23, -2.34, 3.45),
stringsAsFactors = F)
attr(r_input, 'identifiers') <- c('id1')
attr(r_input, 'measures') <- c('m1', 'm2')
vtlAddStatements(session = 'test', restartSession = T,
statements = 'a := r_input;
b := 3;
c := abs(sqrt(14 + a));
d := a + b + c;
e := c + d / a;
f := e - d;
g := -f;
test := a * b + c / a;')
vtlCompile('test')
vtlEvalNodes('test', vtlListNodes('test'))
}
}
|
83d382457ff967393bc0b2c54561660d7a432412
|
96504fbe9d4dcee2e31089d00aabda46dc3950f1
|
/R/predict.snqProfitEst.R
|
7afd7cf204a49faf1bdc840f9f2d0cea22349618
|
[] |
no_license
|
cran/micEconSNQP
|
a326acf97d635454d7ead9571a28766e82a79bfb
|
99c0891b02a42e1ea9546e449f8951ed79326581
|
refs/heads/master
| 2022-07-30T17:18:03.410662
| 2022-06-21T10:30:02
| 2022-06-21T10:30:02
| 17,697,476
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,084
|
r
|
predict.snqProfitEst.R
|
predict.snqProfitEst <- function( object, newdata = object$data,
se.fit = FALSE, se.pred = FALSE, interval = "none", level = 0.95,
useDfSys = TRUE, ... ) {
nNetput <- length( object$pMeans )
nFixed <- length( object$fMeans )
nObsOld <- nrow( object$data )
nObsNew <- nrow( newdata )
modelData <- .snqProfitModelData( data = newdata,
weights = object$weights, priceNames = object$priceNames,
quantNames = object$quantNames, fixNames = object$fixNames,
instNames = object$instNames, form = object$form,
netputScale = object$scalingFactors )
system <- snqProfitSystem( nNetput, nFixed, form = object$form,
profit = TRUE )
restrict <- snqProfitRestrict( nNetput, nFixed, object$form )
nCoefPerEq <- nrow( restrict ) / nNetput
x <- list()
result <- data.frame( obsNo = 1:nObsNew )
for( i in 1:nNetput ) {
x[[ i ]] <- model.matrix( system[[ i ]], modelData ) %*%
restrict[ ( ( i - 1 ) * nCoefPerEq + 1 ):( i * nCoefPerEq ), ]
result[[ object$quantNames[ i ] ]] <- c( x[[ i ]] %*%
object$coef$liCoef )
if( se.fit || interval == "confidence" ) {
result[[ paste( object$quantNames[ i ], ".se.fit", sep = "" ) ]] <-
diag( x[[ i ]] %*% object$coef$liCoefCov %*%
t( x[[ i ]] ) )^0.5
}
if( se.pred || interval == "prediction" ) {
result[[ paste( object$quantNames[ i ], ".se.pred", sep = "" ) ]] <-
diag( x[[ i ]] %*% object$coef$liCoefCov %*%
t( x[[ i ]] ) + object$est$residCov[ i, i ] )^0.5
}
if( interval != "none" ) {
if( useDfSys ) {
tval <- qt( 1 - ( 1- level )/2, df.residual( object$est ) )
} else {
tval <- qt( 1 - ( 1- level )/2, df.residual( object$est$eq[[i]] ) )
}
if( interval == "confidence" ) {
seName <- paste( object$quantNames[ i ], ".se.fit", sep = "" )
} else if( interval == "prediction" ) {
seName <- paste( object$quantNames[ i ], ".se.pred", sep = "" )
} else {
stop( "argument 'interval' must be either 'none', 'confidence'",
" or 'prediction'" )
}
result[[ paste( object$quantNames[ i ], ".lwr", sep="" ) ]] <-
result[[ object$quantNames[ i ] ]] - ( tval * result[[ seName ]] )
result[[ paste( object$quantNames[ i ], ".upr", sep="" ) ]] <-
result[[ object$quantNames[ i ] ]] + ( tval * result[[ seName ]] )
if( !se.fit && interval == "confidence" ) result[[ seName ]] <- NULL
if( !se.pred && interval == "prediction" ) result[[ seName ]] <- NULL
}
}
if( !( "obsNo" %in% object$quantNames ) ) result$obsNo <- NULL
i <- nNetput + 1
x[[ i ]] <- model.matrix( system[[ i ]], modelData )
result$profit <- c( x[[ i ]] %*% object$coef$allCoef )
if( se.fit || interval == "confidence" ) {
result[[ "profit.se.fit" ]] <- diag( x[[ i ]] %*%
object$coef$allCoefCov %*% t( x[[ i ]] ) )^0.5
}
if( se.pred || interval == "prediction" ) {
s2 <- sum( residuals( object )$profit^2 ) / nObsOld
result[[ "profit.se.pred" ]] <- diag( x[[ i ]] %*%
object$coef$allCoefCov %*% t( x[[ i ]] ) +
s2 )^0.5
}
if( interval != "none" ) {
if( useDfSys ) {
tval <- qt( 1 - ( 1- level )/2, df.residual( object$est ) )
} else {
tval <- qt( 1 - ( 1- level )/2, nObsOld )
}
if( interval == "confidence" ) {
seName <- "profit.se.fit"
} else if( interval == "prediction" ) {
seName <- "profit.se.pred"
} else {
stop( "argument 'interval' must be either 'none', 'confidence'",
" or 'prediction'" )
}
result[[ "profit.lwr" ]] <- result[[ "profit" ]] -
( tval * result[[ seName ]] )
result[[ "profit.upr" ]] <- result[[ "profit" ]] +
( tval * result[[ seName ]] )
if( !se.fit && interval == "confidence" ) result[[ seName ]] <- NULL
if( !se.pred && interval == "prediction" ) result[[ seName ]] <- NULL
}
return( result )
}
predict.snqProfitImposeConvexity <- function( object, newdata = object$data,
se.fit = FALSE, se.pred = FALSE, interval = "none", level = 0.95,
useDfSys = TRUE, ... ) {
if( is.null( object$sim ) ) {
if( se.fit ) {
warning( "setting argument 'se.fit' to 'FALSE' because",
" standard errors are not available" )
se.fit <- FALSE
}
if( se.pred ) {
warning( "setting argument 'se.pred' to 'FALSE' because",
" standard errors are not available" )
se.pred <- FALSE
}
if( interval != "none" ) {
warning( "setting argument 'interval' to 'none' because",
" standard errors are not available" )
interval <- "none"
}
}
result <- predict.snqProfitEst( object, newdata = newdata,
se.fit = se.fit, se.pred = se.pred, interval = interval,
level = level, useDfSys = useDfSys, ... )
return( result )
}
|
df2883e135e23e0128ede1ecf0eeb203e87e8cb3
|
98249747ca7a0b15bf34d0fd3fe9389d1bb4052a
|
/man/AnovaTest.Rd
|
c177c3d907a34026b48d175a9c516ce3e86f3f00
|
[] |
no_license
|
suraj-yathish/MCSlibrary
|
a5fe2cdbfd74764fec9996e9c3c294afa18a40ab
|
77e1be2cf25d47b2784c8eb1bb2e3d278fb62ace
|
refs/heads/master
| 2020-03-14T10:51:06.997578
| 2018-06-04T09:13:57
| 2018-06-04T09:13:57
| 131,577,606
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 517
|
rd
|
AnovaTest.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AnovaTest.R
\name{AnovaTest}
\alias{AnovaTest}
\title{Function to perform Anova test}
\usage{
AnovaTest(data, VIF1, VIF3, defect1, defect2)
}
\arguments{
\item{Collect}{Chisq values of all the metrics involved in LR, Defaults to dataset, VIF1, VIF3, defect.}
}
\description{
A function that automates the process of performing Anova type 2 test for the LR models.
}
\examples{
AnovaTest()
}
\keyword{VIF,}
\keyword{data}
\keyword{defect,}
|
d0583fb82eba5f4b315359402c54d9875d3ddf15
|
305209120a483d820dde9f4a790709e2ca21b83f
|
/x.R
|
c772527082c3c4938c941f76c4b508756fc89d6d
|
[] |
no_license
|
misal6/DataProductsSlides
|
cbd5627465c3f86533430505019bc5842ac575df
|
3fbb150f66fc950421a4cd5e4671856d0c92210c
|
refs/heads/master
| 2021-01-25T05:21:48.421253
| 2015-01-24T11:09:14
| 2015-01-24T11:09:14
| 29,774,036
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 340
|
r
|
x.R
|
library(shiny)
library(datasets)
library(forecast)
idxdata=ts(EuStockMarkets[,"FTSE"])
idxarima=auto.arima(idxdata)
fcst=forecast(idxarima,h=260)
resfcst=tail(as.data.frame(fcst),1)
curval=round(tail(idxdata,1))
fctval=round(resfcst[,1])
pct=round(((fctval-curval)/curval)*100)
print(paste("Return on Investment : ",pct,"%", sep = ""))
|
81d1cffee017ea1c64ac580a8fd0c50683c583e9
|
e6acff9db72596867cdbcba70a8b3711cc97a317
|
/methylationPattern.R
|
4f25edcfb77d06e9816447928edcfb39394fb720
|
[] |
no_license
|
johnmous/methylationScripts
|
bb86118e9e4fae6440625803500b137f2832e0c5
|
e401f74f21f522e7fb9752f84c3aff982fdeec0f
|
refs/heads/master
| 2021-08-23T20:51:03.295658
| 2017-12-06T13:46:33
| 2017-12-06T13:46:33
| 113,179,998
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,921
|
r
|
methylationPattern.R
|
## Author: I. Moustakas
## Title: Get the methylation patterns and count them
## Usage: methylationPattern.R bismarkCpG outputDir sampleName
library("reshape2")
library("stringr")
args = commandArgs(trailingOnly=TRUE)
if (length(args) != 3 ) {
stop("CpG file from bismark (CpG_OB_*), amplicons table and output direcotry must be supplied", call.=FALSE)
}
bismarkCpG_OB = args[1]
ampliconsFile = args[2]
outDir = args[3]
dir.create(outDir, showWarnings = FALSE)
regionReportFile = paste0(outDir, "/regionReport.txt")
file.create(regionReportFile, showWarnings = FALSE)
## Get sample name out of the bismakr file name
sampleName = str_match(bismarkCpG_OB, ".+/CpG_OB_(.+)_bismark.+")[1,2]
line <- sprintf("=========\nThis concerns sample: %s\n=========",sampleName )
regionReportFile = paste0(outDir, "/", sampleName, "_regionReport.txt")
file.create(regionReportFile, showWarnings = FALSE)
write(line, file=regionReportFile, append=TRUE)
## Load data. Forward and reverse strand are in two separate files (OB and OT). Combine them in one df
## If file does not exist, create empty DF
emptyDF <- data.frame(V1 = factor(),
V2 = factor(),
V3 = factor(),
V4 = integer(),
V5 = factor())
if (file.exists(bismarkCpG_OB)) {
methylationOB <- read.table(bismarkCpG_OB, skip = 1, sep="\t")
} else methylationOB <- emptyDF
bismarkCpG_OT <- gsub("CpG_OB_", "CpG_OT_", bismarkCpG_OB)
if (file.exists(bismarkCpG_OT)) {
methylationOT <- read.table(bismarkCpG_OT, skip = 1, sep="\t")
}else methylationOT <- emptyDF
methylation <- rbind(methylationOB, methylationOT)
colnames(methylation) <- c("Read", "MethylStatus", "Chr", "Pos", "Zz")
## Load amplicon positions
amplicons <- read.table(ampliconsFile, header = TRUE)
SumPos <- function(methCounts, pattern){
sapply(methCounts, function(c){
if (is.na(c[pattern])) 0
else c[pattern]
})
}
SumCountPos <- function(sumMethPos, countPatterns) {
sapply(sumMethPos, function(sum){
sum(countPatterns[sumMethPos==sum])
})
}
## Go through the amplicons and extract from the methylation table
result <- apply(amplicons, 1, function(amplicon) {
name = amplicon["Name"]
chr = amplicon["Chr"]
start = as.integer(amplicon["start"])
end = as.integer(amplicon["end"])
strand = amplicon["strand"]
ampliconMethyl <- methylation[methylation$Chr == chr,]
ampliconMethyl <- ampliconMethyl[ampliconMethyl$Pos>=start & ampliconMethyl$Pos<=end, ]
## If there are records, proceed
if (nrow(ampliconMethyl) > 0) {
ampliconMethyl <- ampliconMethyl[ ,c(1,4,2)]
methylPos <- ampliconMethyl$Pos
countPos <- table(methylPos)
sumRecords <- length(unique(ampliconMethyl[,1]))
highCountPos <- names(countPos[countPos > 0.11*sumRecords])
ampliconMethyl <- ampliconMethyl[ampliconMethyl$Pos %in% highCountPos, ]
methylPattern <- dcast(ampliconMethyl, Read ~ Pos )
positions <- as.character(paste0(sort(unique(ampliconMethyl$Pos)), ", "))
numberPositions <- length(positions)
line <- sprintf("\nIn Amplicon: %s on %s:%d-%d there were %d CpG positions detected. These are:" , name, chr, start, end, numberPositions)
write(line, file=regionReportFile, append=TRUE)
write(positions, file=regionReportFile, append=TRUE)
methylPattern[is.na(methylPattern)] <- "*"
methylPattern <- methylPattern[-1]
countPatterns <- table(do.call(paste0, methylPattern))
listOfPatterns <- strsplit(names(countPatterns), NULL)
patterns <- as.data.frame(do.call(rbind, listOfPatterns))
methCounts<- apply(patterns,1, function(pattern){
table(pattern)
})
colnames(patterns) <- colnames(methylPattern)
patterns$counts <- as.vector(countPatterns)
sumMethPos<- SumPos(methCounts, "+")
patterns$sumMethPos <- sumMethPos
sumCountsMethPos <- SumCountPos(sumMethPos,countPatterns)
patterns$sumCountsMethPos <- sumCountsMethPos
patterns$pcntSumCountsMethPos <- round(sumCountsMethPos/sumRecords, 3)
sumUnMethPos <- SumPos(methCounts, "-")
patterns$sumUnMethPos <- sumUnMethPos
sumCountsUnMethPos <- SumCountPos(sumUnMethPos,countPatterns)
patterns$sumCountsUnMethPos <- sumCountsUnMethPos
patterns$pcntSumCountsUnMethPos <- round(sumCountsUnMethPos/sumRecords, 3)
sumUnknownPos <- SumPos(methCounts, "*")
patterns$sumUnknownPos <- sumUnknownPos
sumCountsUnknownPos <- SumCountPos(sumUnknownPos,countPatterns)
patterns$sumCountsUnknownPos <- sumCountsUnknownPos
patterns$pcntSumCountsUnknownPos <- round(sumCountsUnknownPos/sumRecords, 3)
patternFile <- sprintf("%s/%s_%s_methylation.tsv", outDir, sampleName, name)
write.table(patterns, file=patternFile, quote = F, sep = "\t", row.names = F )
} else {
line <- sprintf("\nAmplicon %s Not Found", name)
write(line, file=regionReportFile, append=TRUE)
}
})
|
927c2f0923afb9e0c8a50d10c4ec9cb9b893b76b
|
6cf9a94de51479dc65dad3608a4b315ba289a36f
|
/man/top_rows_overlap-matrix-method.rd
|
a141d72f0478789e21c7b03d9df9fe152e01c161
|
[] |
no_license
|
NagaComBio/cola
|
818c3afdab7e140d549ab9ebf6995a882c967cf5
|
304b3cf771e97ced7f4b20388815b882202cdd84
|
refs/heads/master
| 2021-04-27T08:31:19.184145
| 2018-02-26T10:00:07
| 2018-02-26T10:00:07
| 122,491,685
| 0
| 0
| null | 2018-02-22T14:45:23
| 2018-02-22T14:45:23
| null |
UTF-8
|
R
| false
| false
| 990
|
rd
|
top_rows_overlap-matrix-method.rd
|
\name{top_rows_overlap-matrix-method}
\alias{top_rows_overlap,matrix-method}
\title{
Overlap of top rows from different top methods
}
\description{
Overlap of top rows from different top methods
}
\usage{
\S4method{top_rows_overlap}{matrix}(object, top_method = all_top_value_methods(),
top_n = round(0.25*nrow(object)), type = c("venn", "correspondance"), ...)
}
\arguments{
\item{object}{a numeric matrix}
\item{top_method}{methods defined in \code{\link{all_top_value_methods}}.}
\item{top_n}{number of top rows}
\item{type}{\code{venn}: use Venn Euler diagram; \code{correspondance}: use \code{\link{correspond_between_rankings}}.}
\item{...}{additional arguments passed to \code{\link{venn_euler}} or \code{\link{correspond_between_rankings}}}
}
\value{
No value is returned.
}
\author{
Zuguang Gu <z.gu@dkfz.de>
}
\seealso{
\code{\link{top_rows_overlap,list-method}}
}
\examples{
set.seed(123)
mat = matrix(rnorm(1000), nrow = 100)
top_rows_overlap(mat, top_n = 25)
}
|
749b60e39b146617dcbe44ee4f8d5b02bb9370c3
|
5ca8793fd39a818675306047c861e8c32965022a
|
/website/Old_source/Function/man/pi_wrapper.Rd
|
34d105151ba420aa7baca489d4e6fa865b9d1dfc
|
[] |
no_license
|
SOCR/TCIU
|
a0dfac068670fa63703b8e9a48236883ec167e06
|
85076ae775a32d89676679cfa6050e683da44d1d
|
refs/heads/master
| 2023-03-09T01:28:28.366831
| 2023-02-27T20:27:26
| 2023-02-27T20:27:26
| 188,899,192
| 8
| 5
| null | 2022-11-12T01:56:25
| 2019-05-27T19:33:38
|
R
|
UTF-8
|
R
| false
| true
| 580
|
rd
|
pi_wrapper.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pi_wrapper.R
\name{pi_wrapper}
\alias{pi_wrapper}
\title{pi_wrapper}
\usage{
pi_wrapper(x)
}
\arguments{
\item{x}{A numeric value}
}
\value{
A number that satifies one of the 4 cases specified below
}
\description{
Define a pi-wrapper calculator to ensure phase-arithmetic stays within [-pi : pi)
}
\details{
This function makes sure that phase-arithmetic is between [-pi, pi]
}
\examples{
pi_wrapper(pi/8 + 1.186667 )
}
\author{
SOCR team <http://socr.umich.edu/people/>
}
|
4e1a9adbb6b88c2c36a421f4a4b139aeb12f8fe3
|
9c6c10769dd5d0d2b0604b5c5d5d43ad3635fd20
|
/R/blblm.R
|
4f0c85c61f42cbaf4607f3582df5b1aaf8247b88
|
[] |
no_license
|
anmeiliu/blblm
|
ec06281c75b7e996d5b0f14b38522ad7a67c1550
|
9960626bc0facc4c2753e81de6fd6aeee4403bb6
|
refs/heads/master
| 2022-11-04T07:31:02.449451
| 2020-06-11T06:02:47
| 2020-06-11T06:02:47
| 268,934,386
| 0
| 0
| null | 2020-06-03T00:03:29
| 2020-06-03T00:03:29
| null |
UTF-8
|
R
| false
| false
| 13,451
|
r
|
blblm.R
|
#' @import purrr
#' @import stats
#' @import utils
#' @import future
#' @importFrom magrittr %>%
#' @details
#' Generalized Linear Models with Bag of Little Bootstraps
"_PACKAGE"
## quiets concerns of R CMD check re: the .'s that appear in pipelines
# from https://github.com/jennybc/googlesheets/blob/master/R/googlesheets.R
utils::globalVariables(c("."))
#' Bag of Little Bootstraps General Linear Model
#'
#' This function implements the bag of little bootstraps for the general linear
#' model.
#'
#' Linear and logistic regression are properly supported, but if your
#' data is appropriately formed, there's no reason you can't use other general
#' linear models with blbglm(). (Corrections to predictions, where necessary,
#' are only made for the logistic case.)
#'
#' Parallelized processing is supported through future::plan(). blbglm()
#' defaults to using future_map(), respecting any plan() the user has set.
#' However, disabling the use of future_map() for sequential processing is
#' usually faster; set use_plan = FALSE for this.
#'
#' blbglm() can also work with filepaths in lieu of a data object. Provide a
#' vector of filepaths and blbglm() will read them for you. (Filepath reading
#' is faster when blbglm() is running in a parallelized mode.) In this case,
#' each file is considered to be one subsample (and m is ignored).
#'
#' By default, blbglm() assigns an equal number of observations to each
#' subsample (ties are broken in favor of earlier numbered subsamples). It is
#' possible to disable this behavior so that blbglm() assigns each observation
#' to a subsample at random, without regard for the number of observation in
#' each subsample. The minimum number of observations in each sample is
#' controlled by min_subsample_size, but it should be at least 2 + the number
#' of dependent variables.
#'
#' @param formula The formula to use for the linear models.
#' @param family The family of the linear models (see glm()). Defaults to
#' gaussian() for linear regression.
#' @param data An object containing data (which can be subsetted and passed to
#' glm()).
#' @param filepaths A vector or list of filepaths to read from.
#' @param read_function The function to use to read files.
#' @param m The number of subsamples to create.
#' @param B The number of bootstraps for each subsample.
#' @param min_subsample_size The minimum size of each subsample. For small
#' numbers of observations per subsample, mitigates weird behaviors from
#' fitting glm() on small data sets.
#' @param even_split Whether to split subsamples so that they are as equally
#' sized as possible.
#' @param use_plan Whether to use the plan set by future::plan(). use_plan =
#' FALSE is faster for sequential processing.
#' @param ... Additional arguments to pass to read_function.
#'
#' @return A blbglm object, containing the estimates from each bootstrap, the
#' formula, and the family.
#'
#' @export
#'
#' @examples
#' blbglm(mpg ~ wt, data = mtcars, B = 100, even_split = TRUE, use_plan = FALSE)
#' blbglm(Species ~ Sepal.Length,
#' family = binomial(), data = iris,
#' m = 3, B = 100, min_subsample_size = 30
#' )
blbglm <- function(formula, family = gaussian(), data = NULL, filepaths = NULL, read_function = read.csv, m = 10, B = 5000, min_subsample_size = NULL, even_split = NULL, use_plan = TRUE, ...) {
if (is.null(data) & is.null(filepaths)) {
stop("Neither data nor filepaths to data provided")
}
if (!is.null(data) & !is.null(filepaths)) {
warning("Both data and filepaths specified, using data")
}
if (!is.null(filepaths) & length(filepaths) != m) {
warning("Number of filepaths provided is not the same as number of splits, using file-based splits")
}
if (!is.null(filepaths) & !is.null(min_subsample_size)) {
warning("Cannot specify min_subsample_size when using file-based splits")
}
if (!is.null(filepaths) & !is.null(even_split)) {
warning("Cannot specify even_split when using file-based splits")
}
if (use_plan & grepl("sequential", deparse(attributes(plan())$call))) {
warning("Using a sequential plan; this is usually slower than not using a plan (set use_plan = FALSE to use no plan)")
}
# check that we have logical subsample options
if (is.null(filepaths)) {
if (is.null(min_subsample_size)) {
if (is.null(even_split)) {
even_split <- TRUE
} else if (!even_split) {
min_subsample_size <- length(all.vars(formula)) + 1
message(paste("Using minimum subsample size ="), min_subsample_size)
}
} else {
if (min_subsample_size * m > nrow(data)) {
stop("min_subsample_size times m must be less than or equal to number of observations")
} else if (!is.null(even_split)) {
if (even_split) {
warning("Cannot specify min_subsample_size when using even splits; ignoring min_subsample_size")
}
} else {
even_split <- FALSE
}
}
} else {
even_split <- FALSE
}
# check which map function to use
if (use_plan) {
active_map <- furrr::future_map
} else {
active_map <- map
}
# do the data processing
if (!is.null(data)) {
data_list <- split_sample(data, m, min_subsample_size, even_split)
estimates <- active_map(data_list, ~ glm_each_subsample(formula, family, ., nrow(.), B))
} else {
estimates <- active_map(filepaths, function(filepath_split) {
data <- filepath_split %>% read_function(...) # passes args along to read function
glm_each_subsample(formula, family, data, nrow(data), B)
})
}
res <- list(estimates = estimates, formula = formula, family = family)
class(res) <- "blbglm"
invisible(res)
}
#' Randomly split data into m parts
#'
#' @param data A data object to split.
#' @param m The number of splits to create.
#' @param min_subsample_size The minimum size of each split.
#' @param even_split Whether to create splits so they are as similarly-sized as possible.
#'
#' @return A list containing m subsets of data.
split_sample <- function(data, m, min_subsample_size, even_split) {
if (even_split) {
idx <- sample(rep_len(1:m, nrow(data))) # permutes the order of the subsample assignments
} else {
idx <- sample.int(m, nrow(data), replace = TRUE)
while ((sum(table(idx) < min_subsample_size)) > 0) {
idx <- sample.int(m, nrow(data), replace = TRUE) # resample until valid
}
}
data %>% split(idx)
}
#' Perform bootstraps with glm()
#'
#' @param formula The formula to use for the linear models.
#' @param family The family of the linear models (see glm()).
#' @param data An object containing data (which can be passed to glm()).
#' @param n The number of observations in the data.
#' @param B The number of bootstraps.
glm_each_subsample <- function(formula, family, data, n, B) {
replicate(B, glm_each_boot(formula, family, data, n), simplify = FALSE)
}
#' Efficiently compute the estimate of glm() parameters for one bootstrap
#'
#' @param formula The formula to use for the linear models.
#' @param family The family of the linear models (see glm()).
#' @param data An object containing data (which can be passed to glm()).
#' @param n The number of observations in the data.
glm_each_boot <- function(formula, family, data, n) {
freqs <- rmultinom(1, n, rep(1, nrow(data)))
glm1(formula, family, data, freqs)
}
#' Efficiently estimate glm() parameters based on observation frequency
#'
#' @param formula The formula to use for the linear models.
#' @param family The family of the linear models (see glm()).
#' @param data An object containing data (which can be passed to glm()).
#' @param freqs The frequency of each observation.
glm1 <- function(formula, family, data, freqs) {
# drop the original closure of formula,
# otherwise the formula will pick a wrong variable from the global scope.
environment(formula) <- environment()
fit <- glm(formula, family = family, data, weights = freqs)
list(coef = blbcoef(fit), sigma = blbsigma(fit, freqs))
}
#' Extract the coefficients from a model fit
#'
#' @param fit The model fit to be extracted from.
blbcoef <- function(fit) {
coef(fit)
}
#' Compute sigma based on a model fit and observation weights
#'
#' The sigma computed here is based on the efficient bootstrap computation used
#' above, i.e. it takes into account the weights as separate observations.
#' fit$weights isn't used because it provides misleading results for
#' non-Gaussian GLMs, but fit$residuals is used to provide residuals that
#' generalize between GLM forms.
#'
#' @param fit The model fit to be extracted from.
#' @param weights The weight of each observation in the model.
blbsigma <- function(fit, weights) {
p <- fit$rank
e <- fit$residuals # works for all forms of GLM
w <- weights
sqrt(sum(w * (e^2)) / (sum(w) - p))
}
#' Print out a blbglm object
#'
#' @param x The object to print.
#'
#' @param ... Additional parameters to pass.
#'
#' @export
#' @method print blbglm
print.blbglm <- function(x, ...) {
cat("blbglm model: ")
print(x$formula)
cat("\ncoefficients:\n")
print(coef(x))
cat("\nsigma: ")
cat(sigma(x), "\n")
}
#' Calculate sigma for a blbglm object
#'
#' @param object The object to calculate sigma for.
#'
#' @param confidence Whether to provide a confidence interval.
#' @param level The level to use for the confidence interval.
#' @param ... Additional parameters to pass.
#'
#' @return Bag of little bootstraps sigma.
#'
#' @export
#' @method sigma blbglm
sigma.blbglm <- function(object, confidence = FALSE, level = 0.95, ...) {
sigma <- mean(map_dbl(object$estimates, ~ mean(map_dbl(., "sigma"))))
if (confidence) {
alpha <- 1 - level
limits <- object$estimates %>%
# na.rm = true in case of subsamples with sigma = NA
map_mean(~ quantile(map_dbl(., "sigma"), c(alpha / 2, 1 - alpha / 2), na.rm = TRUE)) %>%
set_names(NULL)
return(c(sigma = sigma, lwr = limits[1], upr = limits[2]))
} else {
return(sigma)
}
}
#' Calculate coefficients for a blbglm object
#'
#' @param object The object to calculate coefficients for.
#'
#' @param ... Additional parameters to pass.
#'
#' @return Bag of little bootstraps coefficients.
#'
#' @export
#' @method coef blbglm
coef.blbglm <- function(object, ...) {
map_mean(object$estimates, ~ map_cbind(., "coef") %>% rowMeans())
}
#' Calculate confidence intervals for a blbglm object
#'
#' @param object The object to calculate confidence intervals for.
#'
#' @param parm The parameter to calculate confidence intervals for.
#' @param level The level to use for the confidence interval.
#' @param ... Additional parameters to pass.
#'
#' @return Bag of little bootstraps confidence intervals.
#'
#' @export
#' @method confint blbglm
confint.blbglm <- function(object, parm = NULL, level = 0.95, ...) {
if (is.null(parm)) {
parm <- attr(terms(object$formula), "term.labels")
}
alpha <- 1 - level
out <- map_rbind(parm, function(p) {
# na.rm = true in case of subsamples with coef = NA
map_mean(object$estimates, ~ map_dbl(., list("coef", p)) %>% quantile(c(alpha / 2, 1 - alpha / 2), na.rm = TRUE))
})
if (is.vector(out)) {
out <- as.matrix(t(out))
}
dimnames(out)[[1]] <- parm
out
}
#' Predict values from a blbglm object
#'
#' @param object The object to calculate prediction values for.
#'
#' @param new_data The values to predict from.
#' @param confidence Whether to provide a confidence interval.
#' @param level The level to use for the confidence interval.
#' @param inv_link The inverse of the link function, where needed. Logit link functions are detected automatically.
#' @param ... Additional parameters to pass.
#'
#' @export
#' @method predict blbglm
predict.blbglm <- function(object, new_data, confidence = FALSE, level = 0.95, inv_link = NULL, ...) {
X <- model.matrix(reformulate(attr(terms.formula(object$formula, data = new_data), "term.labels")), new_data)
# if no inv_link is provided, try detecting a logit link
if (is.null(inv_link)) {
logit <- ifelse(class(object$fit) == "function", formals(object$fit)$link == "logit",
ifelse(class(object$fit) == "family", object$fit$link == "logit", FALSE)
)
if (logit) {
# logit
inv_link <- function(x) {
logit_pred <- exp(x) / (1 + exp(x))
if (is.infinite(logit_pred)) {
sign(logit_pred)
} else {
logit_pred
}
}
} else { # otherwise, don't use an inverse link
inv_link <- function(x) {
x
}
}
}
# apply coefficients to X, then apply the inverse link function
inv_function <- function(x) {
inv_link(X %*% x$coef)
}
if (confidence) {
map_mean(object$estimates, ~ map_cbind(., inv_function) %>%
apply(1, mean_lwr_upr, level = level) %>%
t())
} else {
# na.rm = true in case of subsamples producing NA
map_mean(object$estimates, ~ map_cbind(., inv_function) %>% rowMeans(na.rm = TRUE))
}
}
mean_lwr_upr <- function(x, level = 0.95) {
alpha <- 1 - level
# na.rm = true in case of subsamples producing NA
c(fit = mean(x, na.rm = TRUE), quantile(x, c(alpha / 2, 1 - alpha / 2), na.rm = TRUE) %>% set_names(c("lwr", "upr")))
}
map_mean <- function(.x, .f, ...) {
(map(.x, .f, ...) %>% reduce(`+`)) / length(.x)
}
map_cbind <- function(.x, .f, ...) {
map(.x, .f, ...) %>% reduce(cbind)
}
map_rbind <- function(.x, .f, ...) {
map(.x, .f, ...) %>% reduce(rbind)
}
|
0057533961429bdc781a3063bca2a8ec46919914
|
49af06a3dd58afae35d379610bf745fb555d6150
|
/plot3.R
|
8c41508da34c521a95eebe86598cdc1ef6dd72de
|
[] |
no_license
|
RidzuanMo/EDA-Project-2
|
5643897a62efdde600b86fd58771a419f360878b
|
772742b2fb31dbc0cd5014ebb6c165ec1fc43cc8
|
refs/heads/master
| 2021-01-13T13:56:07.272633
| 2016-12-13T15:19:34
| 2016-12-13T15:19:34
| 76,142,070
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 622
|
r
|
plot3.R
|
require(ggplot2)
require(plyr)
# load NEI datasource
NEI <- readRDS("summarySCC_PM25.rds")
# filtering Baltimore City dataset
baltimore <- subset(NEI, fips == "24510", select=c("year", "type", "Emissions"))
# source type
pm0 <- ddply(baltimore, .(year, type), summarize, Emissions=sum(Emissions))
pm0 <- transform(pm0, type=factor(type))
# plot graph
gp <- ggplot(pm0, aes(year, log(Emissions))) +
geom_point() +
facet_grid(. ~ type) +
geom_smooth() +
labs(title="Total Emissions in Baltimore City", x="year", y="log(PM2.5)")
png("plot3.png",width=480,height=480,units="px")
print(gp)
dev.off()
|
fd9c2a7858ef6239c251bfcf372681d28ec5c7a0
|
b838ef0d0389fb8731439ff615abc24da5854448
|
/R/RDSProject/R/poisson_reg_quiz.R
|
d00fa8dea2508b35a4c0bf4ceb8a89956f5994b8
|
[] |
no_license
|
sadiagit/DataScience-R
|
a0bdece78fd0e92053c5f2e6e6e972bd7358619a
|
1e90cdccec5173803b83972b9bf5b2577bcf1778
|
refs/heads/master
| 2023-03-16T07:11:37.499153
| 2021-03-09T00:25:57
| 2021-03-09T00:25:57
| 282,094,726
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,655
|
r
|
poisson_reg_quiz.R
|
#log(E(yi)) = log(lamda) = b0+b1*x1_i+b2*x2_i
b0=1.5
b1 = -0.3
b2 = 1
x1_i = 0.8
x2_i=1.2
log_lam = b0+b1*x1_i+b2*x2_i
lam = exp(log_lam)
#2.If tt is the amount of time that we observe, and \lambdaλ is the rate of events per unit of time, then the expected number of events is t \lambdatλ and the distribution of the number of events in this time interval is Poisson(tλ).
lamda = 1/15 #15 cust per hour
ppois(21,30)
df=read.csv("C:/Dev/DataScience/R_doc/Bayes/callers.csv", header = TRUE)
plot(df$isgroup2,df$calls/df$days_active)
plot(df$isgroup2,df$calls)
plot(df$isgroup2,df$age)
plot(df$age,df$calls/df$days_active)
library("rjags")
mod_string = " model {
for (i in 1:length(calls)) {
calls[i] ~ dpois( days_active[i] * lam[i] )
log(lam[i]) = b0 + b[1]*age[i] + b[2]*isgroup2[i]
}
b0 ~ dnorm(0.0, 1.0/1e2)
for (i in 1:2){
b[i] ~ dnorm(0.0, 1.0/1e2)
}
}"
set.seed(102)
data_jags = as.list(df)
params = c("b0", "b")
# inits1 = function() {
# inits = list("b0"=rnorm(1,0.0,100.0) ,"b"=rnorm(2,0.0,100.0))
# }
mod = jags.model(textConnection(mod_string), data=data_jags, n.chains=3)
update(mod, 1e3)
mod_sim = coda.samples(model=mod,
variable.names=params,
n.iter=5e3)
mod_csim = as.mcmc(do.call(rbind, mod_sim))
## convergence diagnostics
plot(mod_sim)
summary(mod_sim)
head(mod_csim)
mean(mod_csim[,1] >0)
x1 = c(1,29)
loglam1 = mod_csim[,"b0"] + mod_csim[,c(2,1)] %*% x1
lam1 = exp(loglam1)
n_sim = length(lam1)
y1 = rpois(n=n_sim, lambda=lam1*30)
plot(table(factor(y1, levels=0:10))/n_sim, pch=2, ylab="posterior prob.", xlab="calls")
mean(y1 >= 1)
|
8e3f11b3adf9f7868b28afa67f53ee2fa8f2b07e
|
7dda987e5bc0dea30143ad52f190b41bc06a1911
|
/ClusteringCorrelation/segmentation3.R
|
8675b5d3ae770aee035afa21cbd7f01bb7548ab2
|
[] |
no_license
|
bgbutler/R_Scripts
|
1b646bc4e087e8d80b52d62fed8eb20841ed2df7
|
b10e69f8781eb2a19befe5f80f59863a0f838023
|
refs/heads/master
| 2023-01-25T04:08:44.844436
| 2023-01-12T16:00:55
| 2023-01-12T16:00:55
| 28,370,052
| 5
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,774
|
r
|
segmentation3.R
|
#load in the libraries
library(MASS) #for LDA
library(dplyr)
library(ggplot2)
library(FactoMineR)
library(class) #for knn
library(rpart) #other machine learning
library(caret) #machine learning package
library(dendextend)
library(dendextendRcpp)
library(xgboost)
library(Metrics)
library(gmodels)
library(stats)
#get a list of the files
url <- "K:/Sandbox/R/Segmentation/TrainingCoke2.csv"
cokeRaw <- read.csv(url, header = TRUE, sep=",", as.is = FALSE)
cokeRaw$Cluster <- as.factor(cokeRaw$Cluster)
#clean up the feminino in the data
cokeRaw$Gender <- as.character(cokeRaw$Gender)
cokeRaw$Gender <- ifelse(cokeRaw$Gender %in% c("Female", "Femenino"),"Female", "Male")
cokeRaw$Gender <- as.factor(cokeRaw$Gender)
#exploratory plot
p <- ggplot(cokeRaw, aes(x = Country, y = Cluster, color = Cluster)) +
geom_point(size = 5)
p
cokeDist <- cokeRaw[,c(8:84)]
#plot tree diagram for entire matrix
par(mai=c(2.0,1.0, 1.0,1.0))
d <- dist(cokeDist)
hc <- hclust(d)
op = par(bg = "white")
dend <- d %>% hclust %>% as.dendrogram
dend %>% set("labels_col", k = 7) %>%
set("labels_cex", .75) %>%
color_branches(k=7) %>%
set("branches_lwd", 2) %>%
hang.dendrogram (hang = -1) %>%
plot(horiz=FALSE, main = "Hierchical Clusters of 7 Segments", axes = FALSE)
axis(side = 2, col = "blue", at = seq(1,25,5), labels = FALSE)
mtext(seq(1,25,5), side = 2, at = seq(1,25,5),
line = 1, col = "blue", las = 2)
dend %>% rect.dendrogram(k = 7, horiz = FALSE)
#set up for lda
#create subset of data
lda.data <- cokeRaw[,c(2,8:84)]
#set up the data sets training and testing
set.seed(1000)
split <- sample(nrow(lda.data), floor (0.9*nrow(lda.data)))
traindf <- lda.data[split,]
testdf <- lda.data[-split,]
#make the training model
data1.lda <- lda(Cluster ~ ., data = traindf)
#make the training model
data2.lda <- lda(Cluster ~ ., data = lda.data,CV = TRUE)
data1.lda
#predict function
data1.lda.p <- predict(data1.lda, newdata = testdf[,c(2:78)])$class
data1.lda.p
plot(data1.lda)
#get the classifications
preVal <- table(data1.lda.p, testdf[,1])
preVal
sum(diag(preVal))/length(testdf$Cluster)
# a better table for evaluating
preValX <- table(data2.lda$class, lda.data[,1])
sum(diag(preValX))/length(lda.data$Cluster)
prevalX <- CrossTable(x = data1.lda.p, y = testdf[,1], prop.chisq = FALSE)
###### Result = LDA is not a good method for this data set
### k means
set.seed(1000)
kclust = kmeans(traindf[,c(2:78)], centers = 7, nstart = 30)
kclust
clustTest <- table(traindf[,1], kclust$cluster)
clustTest
dataDummy <- dummyVars("~", data = lda.data, fullRank = F)
trsfData <- data.frame(predict(dataDummy, newdata = lda.data))
|
f5e414efd44578ed7e306f6eddb70755fa11fce0
|
9ec240c392225a6b9408a1636c7dc6b7d720fd79
|
/packrat/src/backports/backports/R/import.R
|
de409746cd14b24555ea0a347690522cabf3a6d8
|
[] |
no_license
|
wjhopper/PBS-R-Manual
|
6f7709c8eadc9e4f7a163f1790d0bf8d86baa5bf
|
1a2a7bd15a448652acd79f71e9619e36c57fbe7b
|
refs/heads/master
| 2020-05-30T17:47:46.346001
| 2019-07-01T15:53:23
| 2019-07-01T15:53:23
| 189,883,322
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,360
|
r
|
import.R
|
#' @title Import backported functions into your package
#'
#' @description
#' Imports objects from \pkg{backports} into the namespace of other packages
#' by assigning it during load-time.
#' See examples for a code snippet to copy to your package.
#'
#' @param pkgname [\code{character(1)}]\cr
#' Name of the package where the backported function should be assigned.
#' @param obj [\code{character}]\cr
#' Name of objects to assign, as character vector.
#' If \code{NULL}, all backports which are not provided by R itself are assigned.
#' @param force [\code{logical}]\cr
#' If \code{obj} is provided and \code{force} is set to \code{FALSE}, only backports
#' not provided by the base package of the executing R interpreter are imported.
#' Set to \code{TRUE} to ignore this check and always import the backport into the package's namespace.
#' @export
#' @seealso \code{\link[base]{.onLoad}}
#' @examples
#' \dontrun{
#' # This imports all functions implemented in backports while the package is loaded
#' .onLoad <- function(libname, pkgname) {
#' backports::import(pkgname)
#' }
#'
#' # This only imports the function "trimws"
#' .onLoad <- function(libname, pkgname) {
#' backports::import(pkgname, "trimws")
#' }
#'
#' # This imports all backports from base and force-imports "hasName" from utils
#' .onLoad <- function(libname, pkgname) {
#' backports::import(pkgname)
#' backports::import(pkgname, "hasName", force = TRUE)
#' }
#' }
import = function(pkgname, obj = NULL, force = FALSE) {
if (is.null(obj)) {
obj = get_backports()
} else if (!isTRUE(force)) {
obj = intersect(obj, get_backports())
}
if (length(obj) > 0L) {
pkg = getNamespace(pkgname)
backports = getNamespace("backports")
for (x in obj)
assign(x, get(x, envir = backports), envir = pkg)
}
invisible(TRUE)
}
get_backports = function(v = getRversion()) {
v = package_version(v)
unlist(tail(FUNS, -(v$minor + 1L)), use.names = FALSE)
}
FUNS = list(
"3.0.0" = character(),
"3.1.0" = character(),
"3.2.0" = c("anyNA", "dir.exists", "file.size", "file.mode", "file.mtime", "lengths", "file.info", "URLencode"),
"3.3.0" = c("startsWith", "endsWith", "strrep", "trimws", "capture.output"),
"3.4.0" = c("hasName"),
"3.5.0" = c("...length", "...elt", "isFALSE"),
"3.6.0" = c("warningCondition", "errorCondition")
)
|
03cb29211af47aa893e15bd14c580932bbd60a71
|
e4c8af552f8801a088ca91a6cffe77689089d5d7
|
/src/Analysis/0-archive/2b-regress-10day-body-unadj.R
|
fbe4c57eaf2ae09e8d5a8d4168e894b1d31fd81f
|
[] |
no_license
|
jadebc/13beaches-coliphage
|
eb6087b957dbfac38211ac531508860f48094c15
|
3d511ffa91a6dd5256d6832162ea239c1dbbad28
|
refs/heads/master
| 2021-06-17T03:38:00.805458
| 2017-04-27T22:50:06
| 2017-04-27T22:50:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,172
|
r
|
2b-regress-10day-body-unadj.R
|
##########################################
# Coliphage analysis - 6 beaches
# v1 by Jade 7/13/15
# This file conducts maximum likelihood regression
# to estimate prevalence ratios
# Results pooled across beaches
# unadjusted analyses
# 10 day gi illness
##########################################
rm(list=ls())
library(foreign)
setwd("~/Dropbox/Coliphage/")
# --------------------------------------
# load the and pre-preprocess the
# analysis dataset
# (refer to the base functions script
# for details on the pre-processing)
# --------------------------------------
beaches13=read.csv("~/Dropbox/13beaches/data/final/13beaches-analysis.csv")
# load base functions
source("Programs/Analysis/0-base-functions.R")
data=preprocess.6beaches(beaches13)
# restrict to 6 beaches with coliphage data
beach.list=c("Avalon","Doheny","Malibu","Mission Bay",
"Fairhope","Goddard")
all=data[data$beach %in% beach.list,]
# drop individuals with no water quality information
all=subset(all,nowq==0)
# subset to non-missing exposure categories
# to make the robust CI calcs work
all=subset(all,all$bodycontact=="Yes")
# --------------------------------------
# Calculate the actual Ns for each cell
# and store them for plotting and tables
# --------------------------------------
regN <- function(outcome,exposurecat) {
sum(table(outcome,exposurecat))
}
# pooled n's ---------------------------------------
all.n10.fmc1601 = regN(all$gici10[!is.na(all$fmc1601.pres)],
all$fmc1601.pres[!is.na(all$fmc1601.pres)])
all.n10.fmc1602 = regN(all$gici10[!is.na(all$fmc1602.pres)],
all$fmc1602.pres[!is.na(all$fmc1602.pres)])
all.n10.fpc1601 = regN(all$gici10[!is.na(all$fpc1601.pres)],
all$fpc1601.pres[!is.na(all$fpc1601.pres)])
all.n10.fpc1602 = regN(all$gici10[!is.na(all$fpc1602.pres)],
all$fpc1602.pres[!is.na(all$fpc1602.pres)])
# pooled n's by risk level---------------------------------------
data=all[!is.na(all$fmc1602.pres),]
data.high=subset(data,data$groundwater=="Above median flow" | data$berm=="Open")
all.n10.fmc1602.high = regN(data.high$gici10,data.high$fmc1602.pres)
data.low=subset(data,data$groundwater=="Below median flow" | data$berm=="Closed")
all.n10.fmc1602.low = regN(data.low$gici10,data.low$fmc1602.pres)
data=all[!is.na(all$fpc1601.pres),]
data.high=subset(data,data$groundwater=="Above median flow" | data$berm=="Open")
all.n10.fpc1601.high = regN(data.high$gici10,data.high$fpc1601.pres)
data.low=subset(data,data$groundwater=="Below median flow" | data$berm=="Closed")
all.n10.fpc1601.low = regN(data.low$gici10,data.low$fpc1601.pres)
data=all[!is.na(all$fpc1602.pres),]
data.high=subset(data,data$groundwater=="Above median flow" | data$berm=="Open")
all.n10.fpc1602.high = regN(data.high$gici10,data.high$fpc1602.pres)
data.low=subset(data,data$groundwater=="Below median flow" | data$berm=="Closed")
all.n10.fpc1602.low = regN(data.low$gici10,data.low$fpc1602.pres)
# --------------------------------------
# Estimates pooled across beach
# (can't use the mpreg fn because we
# need the actual glm returned object
# for the LR tests)
# 10-day GI illness
# all beaches ----------------
# fmc 1601
all.fit10.fmc1601 <- glm(gici10~fmc1601.pres,family=poisson(link="log"),data=all[!is.na(all$fmc1601.pres),])
all.VC10.fmc1601 <- cl(all[!is.na(all$fmc1601.pres)],fm=all.fit10.fmc1601,
cluster=all$hhid[!is.na(all$fmc1601.pres)])
overall.fit10.fmc1601 <- coeftest(all.fit10.fmc1601, all.VC10.fmc1601)
summary(all.fit10.fmc1601)
overall.fit10.fmc1601
aic.fmc1601=AIC(all.fit10.fmc1601)
# fmc 1602
all.fit10.fmc1602 <- glm(gici10~fmc1602.pres,family=poisson(link="log"),data=all[!is.na(all$fmc1602.pres),])
all.VC10.fmc1602 <- cl(all[!is.na(all$fmc1602.pres),],fm=all.fit10.fmc1602,
cluster=all$hhid[!is.na(all$fmc1602.pres)])
overall.fit10.fmc1602 <- coeftest(all.fit10.fmc1602, all.VC10.fmc1602)
summary(all.fit10.fmc1602)
overall.fit10.fmc1602
aic.fmc1602=AIC(all.fit10.fmc1602)
# fpc 1601
all.fit10.fpc1601 <- glm(gici10~fpc1601.pres,family=poisson(link="log"),data=all[!is.na(all$fpc1601.pres),])
all.VC10.fpc1601 <- cl(all[!is.na(all$fpc1601.pres),],fm=all.fit10.fpc1601,
cluster=all$hhid[!is.na(all$fpc1601.pres)])
overall.fit10.fpc1601 <- coeftest(all.fit10.fpc1601, all.VC10.fpc1601)
summary(all.fit10.fpc1601)
overall.fit10.fpc1601
aic.fpc1601=AIC(all.fit10.fpc1601)
# fpc 1602
all.fit10.fpc1602 <- glm(gici10~fpc1602.pres,family=poisson(link="log"),data=all[!is.na(all$fpc1602.pres),])
all.VC10.fpc1602 <- cl(all[!is.na(all$fpc1602.pres),],fm=all.fit10.fpc1602,
cluster=all$hhid[!is.na(all$fpc1602.pres)])
overall.fit10.fpc1602 <- coeftest(all.fit10.fpc1602, all.VC10.fpc1602)
summary(all.fit10.fpc1602)
overall.fit10.fpc1602
aic.fpc1602=AIC(all.fit10.fpc1602)
# --------------------------------------
# Estimates pooled across beach and stratified by conditions
# (can't use the mpreg fn because we
# need the actual glm returned object
# for the LR tests)
# 10-day GI illness
# all beaches ----------------
# fmc 1602 --------
# high risk conditions
data=all[!is.na(all$fmc1602.pres),]
data.high=subset(data,data$groundwater=="Above median flow" | data$berm=="Open")
all.fit10.fmc1602.high <- glm(gici10~fmc1602.pres,family=poisson(link="log"),data=data.high)
all.VC10.fmc1602.high <- cl(data.high,fm=all.fit10.fmc1602.high, cluster=data.high$hhid)
overall.fit10.fmc1602.high <- coeftest(all.fit10.fmc1602.high, all.VC10.fmc1602.high)
summary(all.fit10.fmc1602.high)
overall.fit10.fmc1602.high
aic.fmc1602.high=AIC(all.fit10.fmc1602.high)
# low risk conditions
data.low=subset(data,data$groundwater=="Below median flow" | data$berm=="Closed")
all.fit10.fmc1602.low <- glm(gici10~fmc1602.pres,family=poisson(link="log"),data=data.low)
all.VC10.fmc1602.low <- cl(data.low,fm=all.fit10.fmc1602.low, cluster=data.low$hhid)
overall.fit10.fmc1602.low <- coeftest(all.fit10.fmc1602.low, all.VC10.fmc1602.low)
summary(all.fit10.fmc1602.low)
overall.fit10.fmc1602.low
aic.fmc1602.low=AIC(all.fit10.fmc1602.low)
# fpc 1601 --------
# high risk conditions
data=all[!is.na(all$fpc1601.pres),]
data.high=subset(data,data$groundwater=="Above median flow" | data$berm=="Open")
all.fit10.fpc1601.high <- glm(gici10~fpc1601.pres,family=poisson(link="log"),data=data.high)
all.VC10.fpc1601.high <- cl(data.high,fm=all.fit10.fpc1601.high, cluster=data.high$hhid)
overall.fit10.fpc1601.high <- coeftest(all.fit10.fpc1601.high, all.VC10.fpc1601.high)
summary(all.fit10.fpc1601.high)
overall.fit10.fpc1601.high
aic.fpc1601.high=AIC(all.fit10.fpc1601.high)
# low risk conditions
data.low=subset(data,data$groundwater=="Below median flow" | data$berm=="Closed")
all.fit10.fpc1601.low <- glm(gici10~fpc1601.pres,family=poisson(link="log"),data=data.low)
all.VC10.fpc1601.low <- cl(data.low,fm=all.fit10.fpc1601.low, cluster=data.low$hhid)
overall.fit10.fpc1601.low <- coeftest(all.fit10.fpc1601.low, all.VC10.fpc1601.low)
summary(all.fit10.fpc1601.low)
overall.fit10.fpc1601.low
aic.fpc1601.low=AIC(all.fit10.fpc1601.low)
# fpc 1602 --------
# high risk conditions
data=all[!is.na(all$fpc1602.pres),]
data.high=subset(data,data$groundwater=="Above median flow" | data$berm=="Open")
all.fit10.fpc1602.high <- glm(gici10~fpc1602.pres,family=poisson(link="log"),data=data.high)
all.VC10.fpc1602.high <- cl(data.high,fm=all.fit10.fpc1602.high, cluster=data.high$hhid)
overall.fit10.fpc1602.high <- coeftest(all.fit10.fpc1602.high, all.VC10.fpc1602.high)
summary(all.fit10.fpc1602.high)
overall.fit10.fpc1602.high
aic.fpc1602.high=AIC(all.fit10.fpc1602.high)
# low risk conditions
data.low=subset(data,data$groundwater=="Below median flow" | data$berm=="Closed")
all.fit10.fpc1602.low <- glm(gici10~fpc1602.pres,family=poisson(link="log"),data=data.low)
all.VC10.fpc1602.low <- cl(data.low,fm=all.fit10.fpc1602.low, cluster=data.low$hhid)
overall.fit10.fpc1602.low <- coeftest(all.fit10.fpc1602.low, all.VC10.fpc1602.low)
summary(all.fit10.fpc1602.low)
overall.fit10.fpc1602.low
aic.fpc1602.low=AIC(all.fit10.fpc1602.low)
# --------------------------------------
# save the results
# exclude glm objects and data frames
# (they are really large)
# --------------------------------------
save(
all.n10.fmc1601,all.n10.fmc1602,all.n10.fpc1601,all.n10.fpc1602,
all.n10.fmc1602.high,all.n10.fmc1602.low,all.n10.fpc1601.high,
all.n10.fpc1601.low,all.n10.fpc1602.high,all.n10.fpc1602.low,
all.VC10.fmc1601,all.VC10.fmc1602,all.VC10.fpc1601,all.VC10.fpc1602,
overall.fit10.fmc1601,overall.fit10.fmc1602,overall.fit10.fpc1601,
overall.fit10.fpc1602,
all.VC10.fmc1602.high,all.VC10.fpc1601.high,all.VC10.fpc1602.high,
overall.fit10.fmc1602.high,overall.fit10.fpc1601.high,
overall.fit10.fpc1602.high,
all.VC10.fmc1602.low,all.VC10.fpc1601.low,all.VC10.fpc1602.low,
overall.fit10.fmc1602.low,overall.fit10.fpc1601.low,
overall.fit10.fpc1602.low,
aic.fmc1601,aic.fmc1602,aic.fpc1601,aic.fpc1602,
aic.fmc1602.high,aic.fmc1602.low,
aic.fpc1601.high,aic.fpc1601.low,aic.fpc1602.high,aic.fpc1602.low,
file="~/dropbox/coliphage/results/rawoutput/regress-10day-body-unadj.Rdata"
)
|
43fcd9e4cd7b0f3da856c48f0a7437f3395a7801
|
69ed15a883dfbc2d67023d436dbb4cb9742b3970
|
/man/checkIfVarinaceExcist.Rd
|
d158ff894698fc8f2c38dbe7f532e69a7776be86
|
[] |
no_license
|
joh4n/JGTools
|
57f163463b107028509243260e80f7f05f847dd5
|
7418f924665c03791e758da7bc3112cd6b2022d9
|
refs/heads/master
| 2021-06-20T08:26:36.857149
| 2017-06-17T13:35:56
| 2017-06-17T13:35:56
| 77,140,153
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 748
|
rd
|
checkIfVarinaceExcist.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/smalUtilityFunctions.R
\name{checkIfVarinaceExcist}
\alias{checkIfVarinaceExcist}
\title{Checks if variance excits for selected columns in a data frame, and retuns the column names where variance excist}
\usage{
checkIfVarinaceExcist(names, df, startDate)
}
\arguments{
\item{names}{names of columns to be checked}
\item{df}{a data frame with a date column}
\item{startDate}{the starting date of where the evaluation should start}
}
\value{
a character vector
}
\description{
This function checks if variance excits for selected columns in a data frame, and retuns the column names where variance excist
}
\author{
Johan Gudmundsson, \email{jgu@blackwoodseven.com}
}
|
57275cf28e205647c505e7e0d157bc3e6e9003be
|
f6b8edceb2d8b5344adf8a327f0407c7f3c0246a
|
/tests/testthat/test-compute_sdim.R
|
2ba4f70faa37af746bd1bfde2a40e5775c511622
|
[
"MIT"
] |
permissive
|
bcjaeger/cleanRbp
|
e5c59d7b86c353985fe346cffd919c0704e629ba
|
ec654d7599c4fe5847c77bb28aede3466814e593
|
refs/heads/master
| 2023-05-04T17:01:12.770293
| 2021-05-25T16:35:49
| 2021-05-25T16:35:49
| 275,244,596
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 670
|
r
|
test-compute_sdim.R
|
test_that("standard inputs work", {
mean_bps = runif(n = 100, min = 80, max = 190)
sd_bps = runif(n = 100, min = 1, max = 3)
mean_bps[5] <- NA_real_
# cmp_sdim_values(mean_bps, sd_bps, method = 'A')
# cmp_sdim_values(mean_bps, sd_bps, method = 'B')
expect_is(cmp_sdim_coefs(mean_bps, sd_bps), 'numeric')
expect_is(cmp_sdim_values(mean_bps, sd_bps), 'numeric')
expect_equal(length(cmp_sdim_values(mean_bps, sd_bps)), 100)
mean_bps = runif(n = 1, min = 80, max = 190)
sd_bps = runif(n = 1, min = 1, max = 3)
expect_error(cmp_sdim_coefs(mean_bps, sd_bps), 'at least')
expect_error(cmp_sdim_coefs(letters[1:5], letters[1:5]), 'numeric')
})
|
1ee888ec4aae81efeaf399510fc8c86a812e5206
|
e04a048b646228814b5154aab3f883136d1dfff3
|
/man/GRSDbinom.regressGENO.Rd
|
2b04fb72367f48f834febf4173fc581417064525
|
[] |
no_license
|
elijahedmondson/HZE
|
be105263a3ec7a8fa5c5c7716d5f8459f2ff21c6
|
7ee3ecb514c43af8c08123472410389efc4491a5
|
refs/heads/master
| 2021-01-15T15:49:39.608472
| 2016-09-05T17:12:26
| 2016-09-05T17:12:26
| 48,752,928
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 444
|
rd
|
GRSDbinom.regressGENO.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GRSDbinom.regressGENO.R
\name{GRSDbinom.regressGENO}
\alias{GRSDbinom.regressGENO}
\title{Association mapping on autosomal chromosomes with binary variable outcomes.}
\usage{
GRSDbinom.regressGENO(obj, pheno, pheno.col, addcovar, tx)
}
\author{
Elijah F Edmondson, \email{elijah.edmondson@gmail.com}
Performs association mapping in multiparent mouse populations.
}
|
5caf88f1d9b21f6de612c2468c1364b9f413965f
|
6d43aae6ce66f18f462cd5e923bf6e3503c336c9
|
/R/vg_calc.R
|
157a9c373363e538f5dfd99e5feb907e81fc6570
|
[] |
no_license
|
jnghiem/bfasttools
|
984ee841e74568af11e57cbd0406e791ed7b8161
|
7d7834d81222193ed37119a4ea9cbd9a3d48fd69
|
refs/heads/master
| 2020-03-23T20:39:46.005392
| 2018-07-27T20:20:14
| 2018-07-27T20:20:14
| 141,771,122
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,188
|
r
|
vg_calc.R
|
#' Perform variogram analysis on \code{bfastSpatial()} shift and slope
#' statistics
#'
#' This function uses the \code{gstat} package to compute variogram statistic
#' for the shift and slope output from \code{bfastSpatial} output. This function
#' uses the exponential variogram model.
#'
#' This function returns a data frame with the following fields: \enumerate{
#' \item \code{year}: The year of the observation. \item \code{month}: The month
#' of the observations. \item \code{stat}: The BFAST statistics (\code{"slope"}
#' or \code{"shift"}) \item \code{model}: The variogram model (\code{"Nug"} or
#' \code{"Exp"}) \item \code{psill}: The partial sill. \item \code{range}: The
#' range parameter of the model. Only valid for Exp model. \item
#' \code{effective_range}: The effective range of the model (within 5% of the
#' sill). Only valid for Exp model. \item \code{SSerr}: The sum of squared
#' errors of the model from the sample variogram. \item \code{n}: The number of
#' points in the sample variogram. }
#'
#' If the variogram model is singular or does not converge after 200 iterations,
#' no model is fit and \code{NA}s are returned for the entire row. Since this
#' function tries to fit a large number of models, it may fail to fit a
#' substantial proportion of models if the sample variograms do not match the
#' model well (or require transformation).
#'
#' @param bfast_in A data frame generated by \code{bfastSpatial()}.
#' @param boundary A SpatialPolygons object defining the boundary of the
#' variogram analysis. If this argument is not supplied, all records in
#' \code{bfast_in} will be used.
#' @param template A Raster object with the same resolution, extent, and
#' projection as the raster dataset used to generate \code{bfast_in}.
#' Alternatively, an XML file generated by \code{create_raster_metadata()}.
#' @param cutoff_dist An optional data frame containing the output of
#' \code{pcf_calc()} for the same study area.
#' @param mc.cores A numeric indicating the number of cores to be used in
#' parallel computing.
#' @return A data frame with variogram results.
#' @examples
#' \dontrun{
#' vg_calc(bf_df, boundary=rgdal::readOGR(dsn="C:/Desktop/shapefiles", layer="Mojave"), template="C:/Desktop/raster_metadata.xml", cutoff_dist=pcf_results, mc.cores=6)
#' }
#' @importFrom lubridate year month
#' @importFrom foreach %dopar% foreach
#' @importFrom evaluate evaluate
#' @importFrom dplyr arrange filter
#' @export
vg_calc <- function(bfast_in, boundary=NULL, template, cutoff_dist=NULL, mc.cores=6) {
template <- create_template_raster(template)
if (!is.null(boundary)) {
cells <- extract(template, boundary, cellnumbers=TRUE, df=TRUE)[,"cell"]
} else {
cells <- 1:ncell(template)
}
brks_only <- bfast_in %>%
clean_bfast() %>%
filter(no_cell %in% cells, brk==1)
years <- brks_only[,"year"]
yr.range <- min(years):max(years)
stat <- c("shift", "slope")
if (mc.cores>1) {
cl <- snow::makeCluster(mc.cores, type="SOCK")
doSNOW::registerDoSNOW(cl)
variogram_stat <- foreach(i=yr.range, .combine=rbind, .inorder=FALSE, .packages=c("evaluate", "gstat", "raster", "dplyr"), .export="create_raster") %dopar% {
add.df <- data.frame()
for (j in 1:12) {
brk_stat <- filter(brks_only, year==i, month==j)
if (nrow(brk_stat)==0) next
for (k in 1:length(stat)) {
stat_pts <- rasterToPoints(create_raster("no_cell", stat[k], brk_stat, template), spatial=TRUE)
cut <- filter(cutoff_dist, year==i, month==j, type=="first peak")[,"dist"]*0.8
if (length(cut)==0 | is.na(cut)) {
vg.samp <- variogram(layer~x+y, data=stat_pts)
} else {
vg.samp <- variogram(layer~x+y, data=stat_pts, cutoff=cut)
if (is.null(vg.samp)) vg.samp <- variogram(layer~x+y, data=stat_pts)
}
if (!is.null(vg.samp)) {
test.mod <- evaluate(fit.variogram(vg.samp, vgm("Exp")))
vg.mod <- fit.variogram(vg.samp, vgm("Exp"))
}
if (any(grepl("No convergence", sapply(test.mod, as.character))) | attr(vg.mod, "singular") | is.null(vg.samp)) {
new_rows <- data.frame(year=i, month=j, stat=NA, model=NA, psill=NA, range=NA, effective_range=NA, SSerr=NA, n=NA)
} else {
new_rows <- data.frame(year=i, month=j, stat=stat[k], dplyr::select(as.data.frame(vg.mod), model, psill, range), effective_range=c(0, effective_range(vg.mod)), SSerr=c(0, attr(vg.mod, "SSErr")), n=nrow(vg.samp))
}
add.df <- rbind(add.df, new_rows)
}
}
return(add.df)
}
snow::stopCluster(cl)
} else {
variogram_stat <- data.frame()
for (i in yr.range) {
for (j in 1:12) {
brk_stat <- filter(brks_only, year==i, month==j)
if (nrow(brk_stat)==0) next
for (k in 1:length(stat)) {
stat_pts <- rasterToPoints(create_raster("no_cell", stat[k], brk_stat, template), spatial=TRUE)
cut <- filter(cutoff_dist, year==i, month==j, type=="first peak")[,"dist"]*0.8
if (length(cut)==0 | is.na(cut)) {
vg.samp <- variogram(layer~x+y, data=stat_pts)
} else {
vg.samp <- variogram(layer~x+y, data=stat_pts, cutoff=cut)
if (is.null(vg.samp)) vg.samp <- variogram(layer~x+y, data=stat_pts)
}
if (!is.null(vg.samp)) {
test.mod <- evaluate(fit.variogram(vg.samp, vgm("Exp")))
vg.mod <- fit.variogram(vg.samp, vgm("Exp"))
}
if (any(grepl("No convergence", sapply(test.mod, as.character))) | attr(vg.mod, "singular") | is.null(vg.samp)) {
new_rows <- data.frame(year=i, month=j, stat=NA, model=NA, psill=NA, range=NA, effective_range=NA, SSerr=NA, n=NA)
} else {
new_rows <- data.frame(year=i, month=j, stat=stat[k], dplyr::select(as.data.frame(vg.mod), model, psill, range), effective_range=c(0, effective_range(vg.mod)), SSerr=c(0, attr(vg.mod, "SSErr")), n=nrow(vg.samp))
}
variogram_stat <- rbind(variogram_stat, new_rows)
}
}
}
}
return(arrange(variogram_stat, year, month))
}
|
308689b0fb6b3d9198f78dae02f5f2871a630161
|
0d927c92c7f5fd72d588103541241b1fc1ae9ed0
|
/R/Settings.R
|
f6d9a8a85ab45345d928f38305f4a63f323f90b4
|
[] |
no_license
|
ClaudioZandonella/DMGC_Meta
|
33d176a003495683b98e5193d61381584f40cf4f
|
62d86ae789e034a91766704923c553e9827f51ae
|
refs/heads/master
| 2022-11-11T11:57:25.865327
| 2020-07-02T14:22:04
| 2020-07-02T14:22:04
| 261,465,900
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,434
|
r
|
Settings.R
|
################################
#### Project settings ####
################################
#---- R packages ----
library("conflicted")
library("tidyverse")
library("metafor")
library("clubSandwich")
library("drake")
library("gridExtra")
library("MAd")
library("visNetwork")
# packages_list <- c("conflicted",
# "tidyverse",
# "metafor",
# "clubSandwich",
# "drake",
# "gridExtra",
# "MAd",
# "visNetwork")
#
#
# # load packages
# lapply(packages_list,require, character.only = TRUE)
#
#---- Procedure to remove packages -----
# ip <- as.data.frame(installed.packages())
# ip <- subset(ip, !grepl("MRO", ip$LibPath))
# ip <- ip[!(ip[,"Priority"] %in% c("base", "recommended")),]
# path.lib <- unique(ip$LibPath)
# pkgs.to.remove <- ip[,1]
#
# sapply(pkgs.to.remove, renv::remove, lib = path.lib)
#---- renv comands ----
# renv::settings$snapshot.type("packrat")
# renv::purge()
# renv::hydrate("formatR")
# sapply(packages_list, renv::hydrate)
# renv::remove()
# renv::install("drake")
# renv::snapshot()
# renv::install
# renv::dependencies()
#---- function conflicts ----
# conflicted::conflict_scout()
conflicted::conflict_prefer("filter", "dplyr")
conflicted::conflict_prefer("gather", "tidyr")
#----- ggplot2 settings ----
theme_set(theme_bw())
|
4844693d2942318ff5ce76177917f6af04d00bc9
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/EpiBayes/R/summary.ebhistorical.R
|
0fcea43e90aa2e04ea7dbed04cfffa542989d083
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,043
|
r
|
summary.ebhistorical.R
|
#' @title
#' Summary Method for EpiBayes Historical Object
#'
#' @description
#' This function gives summary measurements for posterior distributions of cluster-level
#' prevalences across all time periods considered. It does so by examining the object
#' output by the \code{\link{EpiBayesHistorical}} function of class
#' \code{ebhistorical}.
#'
#' @param object An object of class \code{ebhistorical} (e.g., the output of
#' function \code{\link{EpiBayesHistorical}}).
#' @param sumstat The summary statistic that the user wishes to be output. The three choices are \code{mean}, \code{quantile}, and \code{variance}. Character scalar.
#' @param prob The probability associated with the quantile one wishes to calculate. Only
#' used if \code{sumstat} is chosen to be \code{quantile}. Real scalar.
#' @param time.labels Labels for the time period axis (e.g., a vector of years). Character
#' vector.
#' @param busterapprox Boolean value indicating whether or not the summary statistics
#' should be computed using the raw posterior distribution values computed via MCMC
#' (\code{TRUE}) or using the best beta distribution approximation via moment matching
#' (\code{FALSE}). Boolean.
#' @param burnin Number of MCMC iterations to discard from the beginning of the chain.
#' Integer scalar.
#' @param ... Additional arguments to be passed on to summary.
#'
#' @return
#' The summary statistics are returned in a matrix with rows indexed by time period and
#' columns indexed by subzone. The body of the matrix is filled with the summary
#' statistics requested for by the argument \code{sumstat}.
#'
#' @seealso
#' This is a method for objects of class \code{ebhistorical} returned by the function
#' \code{\link{EpiBayesHistorical}} and creates its own class of object much like the
#' summary method for \code{lm} objects. There is an additional plot method that will
#' plot the summary output from this method, \code{summary.ebhistorical}.
#'
#' @export
#' @name summary.ebhistorical
summary.ebhistorical = function(object, sumstat = "mean", prob = 0.95, time.labels = NULL, busterapprox = FALSE, burnin = NULL, ...){
# Extract relevant information
periodi.tau = object$RawPost
periodi.betabust = object$BetaBusterEst
unique.subzones = object$ForOthers$unique.subzones
if (is.null(burnin)){
burnin = object$ForOthers$burnin
}
# Get dimensions for output
n.periods = length(periodi.betabust)
n.subzones = nrow(periodi.betabust[[1]])
# Compute summary statistic for each subzone
if(busterapprox){
sumstat.mat = matrix(unlist(lapply(periodi.betabust,
function(x){
apply(x,
1,
function(y){
switch(sumstat,
"mean" = y[1]/(y[1] + y[2]),
"quantile" = qbeta(prob, y[1], y[2]),
"variance" = (y[1] * y[2])/((y[1] + y[2])^2 * (y[1] + y[2] + 1))
)
}
)
}
)
),
n.subzones, n.periods)
} else{
sumstat.mat = matrix(unlist(lapply(periodi.tau,
function(x){
apply(x,
1,
function(y){
switch(sumstat,
"mean" = mean(y[-c(1:burnin)]),
"quantile" = quantile(y[-c(1:burnin)], prob = prob),
"variance" = var(y[-c(1:burnin)])
)
}
)
}
)
),
n.subzones, n.periods)
}
sumstat.mat = list(
"sumstat.mat" = sumstat.mat,
"sumstat.call" = match.call(expand.dots = TRUE),
"ForOthers" = list(
unique.subzones = unique.subzones,
time.labels = time.labels
)
)
# Create a summary class for this object so it can be plotted
class(sumstat.mat) = "ebhistoricalsummary"
# Print output to console
print(sumstat.mat)
return(invisible(sumstat.mat))
}
|
8bdb52c522b1aaa95e302b245403d3401ceeb017
|
056be6c657db6dd94561ded6782a978e0f5995ed
|
/br/br_robust_sample.R
|
6ca35732a348a8dcd8ecd6015f3c195eff318b29
|
[
"MIT"
] |
permissive
|
lunliu454/infect_place
|
9f4e90ed8900e1376cbe6ebd78c903d313c5e564
|
46a7a2d69e74a55be8cc0c26631fd84cda2d23bb
|
refs/heads/main
| 2023-07-24T14:18:34.144798
| 2021-09-03T09:06:11
| 2021-09-03T09:06:11
| 402,244,291
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,503
|
r
|
br_robust_sample.R
|
library(dplyr)
library(lfe)
input <- read.csv("br/br_input.csv", stringsAsFactors = F)
output <- data.frame(intervention = 1 : 10)
for (i in 1 : length(unique(input$code))){
m <- felm(log_win7_Rt_estimate ~ win7_stay_at_home + win7_school_close +
win7_office_close + win7_shop_close + win7_restaurant_close +
win7_bar_close + win7_entertainment_close+
win7_cultural_close + win7_worship_close +
win7_sports_indoor_close + win7_sports_outdoor_close +
win7_gathering_outside_10lower + win7_gathering_outside_10over
| code + as.factor(date) | 0 | code
, filter(input, code != unique(input$code)[i]))
output[ , ncol(output) + 1] <- m$beta[2 : 11]
print(i)
}
output$intervention <- c('School+Childcare', 'Office', 'Retail', 'Restaurant',
'Bar', 'Entertainment', 'Culture', 'Religion', 'Sports indoor',
'Sports outdoor')
m <- felm(log_win7_Rt_estimate ~ win7_stay_at_home + win7_school_close +
win7_office_close + win7_shop_close + win7_restaurant_close +
win7_bar_close + win7_entertainment_close+
win7_cultural_close + win7_worship_close +
win7_sports_indoor_close + win7_sports_outdoor_close +
win7_gathering_outside_10lower + win7_gathering_outside_10over
| code + as.factor(date) | 0 | code, input)
output$default <- m$beta[2 : 11]
write.csv(output, 'plot/sensi_withhold_bra.csv')
|
e06f80e8bbd7946278e69f42b63cf2a33d046aba
|
7fc82291996c34238ce232548f88ee2dd761df49
|
/R/foreigners.R
|
27d636e6ef00a941e2877317de314b4f12991eb3
|
[
"MIT"
] |
permissive
|
ZajacT/ASIA1
|
125a9c1f6413458cf5dfaf9a9ae8a82742aae006
|
f1ba2a637a71603a2adfee6589414d0dac6eda34
|
refs/heads/master
| 2021-06-30T04:13:42.998667
| 2018-12-20T14:16:50
| 2018-12-20T14:16:50
| 134,391,334
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,346
|
r
|
foreigners.R
|
#' @title Lists foreigners and their admissions status
#' @description Function lists foreigners and their admissions status and writes
#' results to a file
#' @param registrations optionally path to the file with data on registrations
#' @param scores optionally path to the file with data on recruitment scores
#' @param output optionally path to the file in which results will be saved;
#' if \code{NA} is given as a value of this parameter, results won't be saved
#' to a file
#' @details
#' Location of files contaninig the data to be checked and location of file
#' in which results will be saved can be described noninteractively with
#' function arguments described above or - if any of this arguments is omitted -
#' interactively with a system file-selection dialog.
#' @return Data frame (tibble) with the list of foreign applicants - the same as
#' written to a file described with the \code{output} parameter (data frame is
#' returned invisibly).
#' @importFrom dplyr filter select
#' @importFrom rlang ensym
#' @importFrom utils write.csv2
#' @export
foreigners <- function(registrations = NULL, scores = NULL, output = NULL) {
if (is.null(registrations)) {
registrations <- choose_file(" z danymi o rekrutacjach")
}
check_input_path(registrations, "registrations")
registrations <- read_file(registrations)
if (is.null(scores)) {
scores <- choose_file(" z danymi o punktach rekrutacyjnych")
}
check_input_path(scores, "scores")
scores <- read_file(scores)
cat("--------------------\n",
"Łączenie pliku z danymi o rekrutacjach z danymi o punktach rekrutacyjnych.\n",
sep = "")
registrations <- join_with_check(registrations, scores,
"danych o rekrutacjach",
"danych o punktach rekrutacyjnych")
cat("--------------------\n",
"Wyszukiwanie obserwacji.\n",
sep = "")
#-----------------------------------------------------------------------------
#|-> Here starts filtering data
#-----------------------------------------------------------------------------
results <- registrations %>%
select(pesel, studia, sciezka, imie, imie2, nazwisko, obywatelstwo,
czy_oplacony, zakwalifikowany, przyjety, wynik) %>%
filter(!(obywatelstwo %in% "PL"))
#-----------------------------------------------------------------------------
#|-> Here ends filtering data
#-----------------------------------------------------------------------------
cat("--------------------\n",
"Zapisywanie listy\n",
sep = "")
if (is.null(output)) {
output <- choose_file(", w którym ma zostać zapisana lista obcokrajowców (plik zostanie zapisany w formacie CSV ze średnikiem jako separatorem pola)",
errorOnCancel = FALSE)
}
if (!is.na(output)) {
output <- sub("[.]csv$", "", output) %>% paste0(".csv")
if (!(check_output_path(output, "output") %in% TRUE)) {
output <- NA
}
}
if (is.na(output)) {
warning("Lista nie zostanie zapisana do pliku, ponieważ nie podano jego nazwy.",
call. = FALSE, immediate. = TRUE)
} else {
write.csv2(results, output, row.names = FALSE, na = "",
fileEncoding = "UTF-8")
cat("Zapisano listę do pliku '", output, "'.\n", sep = "")
}
invisible(results)
}
|
07f190927c0b1458ac192b556a3518df283accd3
|
b75e75cffe78ac9b8e7067cac738408ae2d7d634
|
/Utils.R
|
82d485a726c9306ecabcc044d081330b12b73064
|
[] |
no_license
|
redsnic/SteamAnalysis
|
29e16f58a38111f5721441153c4d26b0a440d14c
|
2786a5c2381689fc7fdbdfef83d17caf0e058154
|
refs/heads/master
| 2022-12-08T11:13:06.438605
| 2020-08-29T18:03:34
| 2020-08-29T18:03:34
| 286,234,354
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,105
|
r
|
Utils.R
|
# rimuovi caratteri speciali che possono essere problematici in caso di join
clean.text <- function(df,colname,er){
colname <- enquo(colname)
df %>% mutate( !!colname := map_chr(name, ~ gsub(er, "", . ))) %>%
mutate( !!colname := tolower(!!colname)) %>%
mutate( !!colname := map_chr(name, ~ gsub("\\s+", " ", . )))
}
# restituisce il numero di classi basate sul raggruppamento indicato dalle colonne in ...
number.of.classes <- function(df, ...) nrow(df %>% group_by(...) %>% count())
# conta il numero di linee valide
count.selected.lines <- function(df, ...){
nrow(df %>% filter(...))
}
# trasforma un campo (trasformabile in numerico) in una serie di fattori (suddivisioni indicate da breaks)
factorise <- function( df, colname, breaks ){
colname <- enquo(colname)
df %>% mutate(!!colname := as.numeric(!!colname)) %>%
mutate( !!colname := cut(!!colname, breaks=breaks) )
}
# filtra per match di espressione regolare
string.query <- function(df, colname, er){
colname <- enquo(colname)
df %>% filter(grepl(er, !!colname))
}
# prendi il df degli elementi unici di una colonna
get.unique <- function(df, colname){
colname <- enquo(colname)
df %>% select(!!colname) %>% unique()
}
# filter applicato su colonne con tag
filter.by.tag.or <- function(df, colname, ...){
colname <- enquo(colname)
# usa degli id per selezionare gli out validi
df2 <- df %>% mutate(id.. = seq(1,nrow(df))) %>%
unnest(!!colname) %>% filter(!!colname %in% ...) %>%
select(id..) %>% unique()
semi_join( df %>% mutate(id.. = seq(1,nrow(df))),
df2, by=c("id..")) %>%
select(-id..)
}
# filter applicato su colonne con tag
filter.by.tag.negated.or <- function(df, colname, ...){
colname <- enquo(colname)
# usa degli id per selezionare gli out validi
df2 <- df %>% mutate(id.. = seq(1,nrow(df))) %>%
unnest(!!colname) %>% filter((!!colname %in% ...)) %>%
select(id..) %>% unique()
anti_join( df %>% mutate(id.. = seq(1,nrow(df))),
df2, by=c("id..")) %>%
select(-id..)
}
# come sopra ma devono esserci tutte le tag
filter.by.tag.and <- function(df, colname, ...){
colname <- enquo(colname)
for(x in ...) filter(df, x %in% !!colname)
}
# funzione per verificare se un grafo è regolarizzabile
# (slide lezione)
regularify = function (g) {
n = vcount(g)
m = ecount(g)
E = get.edges(g, E(g))
B = matrix(0, nrow = n, ncol = m)
# build incidence matrix
for (i in 1:m) {
B[E[i,1], i] = 1
B[E[i,2], i] = 1
}
# objective function
obj = rep(0, m + 1)
# constraint matrix
con = cbind(B, rep(-1, n))
# direction of constraints
dir = rep("=", n)
# right hand side terms
rhs = -degree(g)
# solve the LP problem
sol = lp("max", obj, con, dir, rhs)
# get solution
if (sol$status == 0) {
s = sol$solution
# weights
w = s[1:m] + 1
# weighted degree
d = s[m+1]
}
# return the solution
if (sol$status == 0) return(list(weights = w, degree = d)) else return(NULL)
}
# Compute power x = (1/x) A
#INPUT
# A = graph adjacency matrix
# t = precision
# OUTPUT
# A list with:
# vector = power vector
# iter = number of iterations
# (slide lezione)
power_utils = function(A, t) {
n = dim(A)[1];
# x_2k
x0 = rep(0, n);
# x_2k+1
x1 = rep(1, n);
# x_2k+2
x2 = rep(1, n);
diff = 1
eps = 1/10^t;
iter = 0;
while (diff > eps) {
x0 = x1;
x1 = x2;
x2 = (1/x2) %*% A;
diff = sum(abs(x2 - x0));
iter = iter + 1;
}
# it holds now: alpha x2 = (1/x2) A
alpha = ((1/x2) %*% A[,1]) / x2[1];
# hence sqrt(alpha) * x2 = (1/(sqrt(alpha) * x2)) A
x2 = sqrt(alpha) %*% x2;
return(list(vector = as.vector(x2), iter = iter))
}
# funzione per il calcolo della similarità
# (slide lezione)
similarity = function(g, type = "cosine", mode = "col" ) {
A = as_adjacency_matrix(g, sparse = FALSE)
if (mode == "row") {A = t(A)}
if (type == "cosine") {
euclidean = function(x) {sqrt(x %*% x)}
d = apply(A, 2, euclidean)
D = diag(1/d)
S = D %*% t(A) %*% A %*% D
}
if (type == "pearson") {
S = cor(A)
}
return(S)
}
# Esegue un passo di una visita BFS
BFS.reachable <- function(current, A){
for(i in 1:length(current)){
if(current[i]!=0){
current <- as.numeric(current | A[i,])
}
}
current
}
# versione iterativa della procedura per il calcolo della similarità per tag
exp.corr.similarity.iter <- function(A, selected, SIM_M, max_iter, current, COV){
done <- rep(0, times=length(current))
SIM_M[selected,selected] <- 1
for(iter in 1:max_iter){
if(iter != 1)
done <- current
current <- BFS.reachable(current,A)
for(j in 1:length(current)){
if( (current[j] >= 1) && (done[j] == 0)){
SIM_M[selected,j] <- max(SIM_M[selected,j], COV[selected,j]/(2^(iter-1)))
SIM_M[j,selected] <- SIM_M[selected,j]
}
}
}
return(SIM_M)
}
# versione ricorsiva (più lenta) della procedura per il calcolo della similarità per tag
exp.corr.similarity <- function(A, bit, selected, SIM_M, iter, max_iter, current, COV){
if(iter == 0){
SIM_M[selected,selected] <- 1
return(exp.corr.similarity(A, bit, selected, SIM_M, iter+1, max_iter, current, COV))
}
if(iter <= max_iter){
current <- BFS.reachable(current,A)
for(j in 1:length(current)){
if( (current[j] >= 1)){
SIM_M[selected,j] <- max(SIM_M[selected,j], COV[selected,j]/(2^(iter-1)))
SIM_M[j,selected] <- SIM_M[selected,j]
}
}
return(exp.corr.similarity(A, bit, selected, SIM_M, iter+1, max_iter, current, COV))
}else{
return(SIM_M)
}
}
# calcolo dell'agreement tra bitset
compute.bitcor <- function(bitv){
m <- matrix( rep(0,times=nrow(bitv)^2), byrow = TRUE, ncol=nrow(bitv))
for(i in 1:nrow(m)){
for(j in i:ncol(m)){
# calcola l'agreement %
m[i,j] <- (ncol(bitv) - sum(xor(bitv[i,], bitv[j,])))/ncol(bitv)
m[j,i] <- m[i,j]
}
}
m
}
# media la somma dei valori di un vettore dividendo
# per i soli numeri positivi
mean.of.positives <- function(M){
total <- apply(M, 1, sum)
positives <- apply(M,1,
function(l){
cnt <- 0
for(i in l)
if(i>0) cnt <- cnt + 1
ifelse(cnt>0,cnt,1)
})
total/positives
}
# Calcola l'affinità media di un nodo rispetto alle conponenti di una selezione
# sfruttando la compatibilità di bitsets associati ai nodi
#
# g : grafo G(V,E)
# sel : nodi di V da scansionare
# COV : matrice precalcolata delle compatibilità dei bitset
# depth : profondità della BFS per la propagazione dell'affinità con un singolo nodo
#
get.affinity <- function(g, sel, COV, depth){
A <- as_adjacency_matrix(g, sparse = FALSE)
Sim_matrix <- matrix(rep(0, times=nrow(A)^2), ncol=nrow(A))
for(i in sel){
ini <- rep(0,times=ncol(A))
ini[i] <- 1
Sim_matrix <- exp.corr.similarity.iter(A, i, Sim_matrix, depth, ini, COV)
}
mean.of.positives(Sim_matrix)
}
#### shared Userbase
# calcola la lista di adiacenza dalla matrice
to.adj.list<-function(A){
AL <- list()
for(i in 1:nrow(A)){
AL[[i]]<-which(A[i,] == 1)
}
AL
}
# calcolo della similarità per userbase condivisa (versione R)
get.shared.userbase.similarity <- function(g, sel, depth, attr){
A <- as_adjacency_matrix(g, sparse = FALSE)
AL <- to.adj.list(A)
Sim_matrix <- matrix(rep(0, times=nrow(A)*length(sel)), ncol=nrow(A))
x <- 1
for(i in sel){
ini <- set(i)
Sim_matrix[x] <- shared.userbase.similarity(A,AL, i, depth, ini, g, attr)
x <- x+1
}
apply(Sim_matrix, 2, sum)
}
# funzione ausiliaria per il calcolo della similarità sulla base di uno specifico gioco/nodo
shared.userbase.similarity <- function(A,AL, i, depth, Q, g, attr){
flow <- rep(0,times=ncol(A))
done <- rep(FALSE,times=ncol(A))
while(!set_is_empty(Q)){
# prendi l'elemento dalla coda
for(v in Q){
print(as.list(v)[[1]])
Q <- Q-v
break
}
for(d in AL[[v]]){
if(!done[d]){
w <- get.edge.attribute(g,attr,
get.edge.ids(g,c(v,d)))
flow[d] <- w + flow[v]/2
Q<-Q+d
}
}
done[v]<-TRUE
}
return(flow)
}
# funzione per il calcolo della cosine similarity media
# per un insieme di nodi e un altro nodo
cosine.set.similarity <- function(sel, cosine.similarity.matrix){
cosine <- rep(0,times=ncol(cosine.similarity.matrix))
for(i in 1:length(cosine)){
cosine[i] <- sum(map_dbl(sel, ~cosine.similarity.matrix[i,.]))/length(sel)
}
cosine
}
# classifica con supporto pari merito
# Richiede che il df sia ordinato per score col (in qualche modo)
rank.by.column <- function(df, score_col, order_col){
score_col <- enquo(score_col)
order_col <- enquo(order_col)
df <- df %>% mutate(order_col = 1:nrow(df))
scores <- (df %>% select(!!score_col))[,1]
if(nrow(df)>0){
n <- 1
block <- 0
df[1,quo_text(order_col)] <- n
for(i in 2:nrow(df)){
if( (is.na(scores[i]) && is.na(scores[i-1])) ||
(!is.na(scores[i]) && !is.na(scores[i-1]) && scores[i] == scores[i-1])){
df[i,quo_text(order_col)] <- n
block <- block + 1
}else{
n <- n+block
block <- 0
n <- n+1
df[i,quo_text(order_col)] <- n
}
}
}
return(df)
}
|
6ba9cb10ef93cb123ff4d90c9da1129568ed9368
|
cd5ba89c012f5bc3c14bc6f74a457d0305dc6e24
|
/POLYCHORIC_R.R
|
14ad354e54d94225c52a8ac6d3ac173b95eda2d8
|
[] |
no_license
|
soy2padre/misspatt
|
16f6be068b570ede1b8e61e8d707c4c74d602f59
|
78c68d9f4f1b0c93232f767c97c7f3f23181a678
|
refs/heads/master
| 2021-04-05T11:14:47.780461
| 2020-10-21T12:53:49
| 2020-10-21T12:53:49
| 248,550,410
| 0
| 1
| null | 2020-03-25T01:23:28
| 2020-03-19T16:22:31
|
HTML
|
UTF-8
|
R
| false
| false
| 3,445
|
r
|
POLYCHORIC_R.R
|
# Brian P. O'Connor
# https://github.com/bpoconnor
POLYCHORIC_R <- function (data, method='Revelle', verbose=TRUE){
if (is.integer(data) == FALSE) {
if (all((data - trunc(data)) == 0) == FALSE) {
cat("\nThe data matrix does not appear to consist of whole numbers and is therefore not appropriate
for the computation of polychoric correlations.")
cat("\nConsider stopping the program.\n\n")
}
}
if (anyNA(data) == TRUE) {
data <- na.omit(data)
cat('\n\nCases with missing values were found and removed from the data matrix.\n\n')
}
# for (lupec in 1:ncol(data)) {
# if (is.numeric(data[,lupec]) & is.integer(data[,lupec]) == FALSE) {
# cat("\nThe variables in the data matrix should be factors or integers. Numeric non-integer values\n")
# cat("have been detected, which are not appropriate for the computation of polychoric correlations.\n")
# cat("Consider stopping the program.\n\n") }
# }
# finding the max data value or # of levels (the max function does not work for factors)
nvalues <- apply(data, MARGIN = 2, function(x) max(x, na.rm=TRUE))
nvalues <- max(as.numeric(nvalues))
# use the polychoric function from the psych package (default)
if (nvalues < 9 & (is.null(method) | method=='Revelle')) {
rpolysR <- suppressWarnings(psych::polychoric(data, smooth=TRUE))
rpolys <- rpolysR$rho
if (verbose == TRUE) {
cat('\n\nPolychoric correlations:\n\n')
print(rpolys)
}
}
if (max(nvalues) > 8)
{cat('\n\nUsing the Fox polycor package because the maximum number of item categories is > 8\n\n')}
# use the hetcor function from the polycor package
if (method=='Fox' | max(nvalues) > 8) {
data <- as.data.frame(data) # the data for hetcor must be a dataframe
rpolysF <- polycor::hetcor(data)
rpolys <- rpolysF$correlations
if (verbose == TRUE) {
cat('\n\nTypes of correlations computed by hetcor:\n\n')
rtypes <- rpolysF$type
colnames(rtypes) <- rownames(rtypes) <- colnames(data)
print(rtypes)
cat('\n\nPolychoric correlations:\n\n')
print(rpolys)
}
}
return(invisible(rpolys))
}
# using the polychor function instead of hetcor
# cnoms <- colnames(data) # get colnames
# rpolys <- matrix(-9999,ncol(data),ncol(data))
# for (i in 1:(ncol(data)-1)) {
# for (j in (i+1):ncol(data)) {
# rpolys[i,j] <- polychor(data[,i], data[,j], ML=FALSE, std.err=FALSE, .9999)
# rpolys[j,i] <- rpolys[i,j]
# }}
# diag(rpolys) <- 1
# if (min(eigen(rpolys) $values) < 0) {
# cat("\nOne or more negative eigenvalues exist in the matrix of")
# cat("\npolychoric correlations. The matrix was therefore smoothed")
# cat("\nby adding a ridge to the diagonal (see Werner & Wothke, 1993, p. 261).\n\n")
# # ridge approach = adding a constant to the diagonal so that
# # the smallest eval is > 0; Wothke 1993 p 261, and SAS Proc CALIS p 269
# constant = .25
# increment = .25
# for (lupe in 1:1000) {
# rpolys2 = rpolys + diag(constant*diag(cbind(rpolys)))
# if ((min(eigen(rpolys2) $value)) > 0 & (min(eigen(rpolys2) $value)) < .001) {break}
# if ((min(eigen(rpolys2) $value)) <= 0) { constant = constant + increment}
# if ((min(eigen(rpolys2) $value)) > 0) { increment = increment / 2; constant = constant - increment}
# }
# rpolys <- rpolys2
# return(rpolys)
# }
# colnames(rpolys) <- cnoms
# rownames(rpolys) <- cnoms
# return(invisible(rpolys))
# }}
|
e6fc8e6ad5f58db3fcc4625c4ec3c7c845c3c418
|
6059195ce6e1a4cdee3346acb689c26aa42d8c7d
|
/R/data.R
|
682ce496eae63a05512443a7084359193b1e65c7
|
[] |
no_license
|
jleluyer/signet
|
2e7d3534241ccd591305b8590398283f681280ed
|
b488d6d1fb3c0e223e815a75e3374621decb452f
|
refs/heads/master
| 2021-01-21T20:25:49.861599
| 2017-04-18T20:37:42
| 2017-04-18T20:37:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 245
|
r
|
data.R
|
#' Datasetscores.
#'
#' A dataset of gene scores
#'
#' @format A data frame with 17918 rows and 2 variables:
#' \describe{
#' \item{gene}{gene identifier}
#' \item{score}{gene score}
#' ...
#' }
#' @source Daub et al., 2013, MBE.
"scores"
|
6be091ca7fe99be6856ce761b5e2b3cdb597000d
|
a2c983c2ef6f9d3ff4aaf1c7ad8cf59e4c9b7fe4
|
/stock.R
|
7496b3a62039b2c67e9fdd2eb98f26b867646f00
|
[] |
no_license
|
isaac-altair/Multivariate-Analysis
|
d426402d2b1d40a5b5d4f1718bc6fe484a7fc21a
|
d6739481504fc41e9d6c74d81d8ba36e86fdbebd
|
refs/heads/master
| 2020-03-10T20:57:58.269688
| 2018-04-15T06:44:15
| 2018-04-15T06:44:15
| 129,581,833
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 488
|
r
|
stock.R
|
T = read.csv("YahooStock.csv", header=TRUE)
names(T)
head(T)
P = T[ , 2:11]
names(P)
plot(P[,1],type='l',ylim=c(0,100))
# plot several time series
points(P[,3],type='l',col=3)
points(P[,4],type='l',col=4)
points(P[,5],type='l',col=5)
p= prcomp(P,scale=TRUE)
summary(p)
p$rotation
plot(p, type="l")
cor(P, p$x[,1:2])
p1 = p$x[ ,1]
p2 = p$x[ ,2]
hist(p1)
hist(p2)
qqnorm(p1)
qqnorm(p2)
plot(1:5000, p1)
points(1:5000, p2, col="blue")
plot(p1, p2)
|
8b0ae8f489c922b79279e140dac0c9b205f72fbc
|
f2ccc4dd8363a7279365524c1b15b895e3e9166c
|
/man/plotAsso.Rd
|
ff01388cad6a0f0646354c26872fd610c9232b28
|
[] |
no_license
|
cran/IntegratedJM
|
79406b2b18f459e09534659691a794e762d7f296
|
ec82f6ce916fdc8305b2e09a317b097a27c0f754
|
refs/heads/master
| 2021-05-04T11:22:59.262537
| 2017-08-03T21:37:38
| 2017-08-03T21:37:38
| 48,081,992
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 727
|
rd
|
plotAsso.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/Functions.R
\name{plotAsso}
\alias{plotAsso}
\title{plotAsso}
\usage{
plotAsso(jointModelResult, type)
}
\arguments{
\item{jointModelResult}{Data frame, containing the results from the fitJM function.}
\item{type}{Character string, specifying the type of association - Pearson or Spearman.}
}
\value{
Creates a plot
}
\description{
The plotAsso function is used to plot the unadjusted association vs the adjusted association for all the genes.
}
\details{
Plots the unadjusted association vs the adjusted association for all the genes.
}
\examples{
\dontrun{
plotAsso(jointModelResult=jmRes,type="Pearson")
}
}
|
71c15b84a5364d47ff06d2eec77ba0d5a2481bda
|
d60af7d5b9ff9f1f5455039b5523e32fe6e17b8c
|
/MachineLearningHW2.R
|
7fa4c16bf803f4a3a52ad34303202520d93925ff
|
[] |
no_license
|
TanalpSengun/MachineLearning
|
3f11c36c44938ddff31294f37b30a4a2d08a602a
|
527cbcde0b96fb510d29647a19bb915c9d7fcac2
|
refs/heads/master
| 2022-12-02T10:59:55.890178
| 2020-08-07T12:36:10
| 2020-08-07T12:36:10
| 285,816,096
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,665
|
r
|
MachineLearningHW2.R
|
#this is the safer version of log in case of the threat log0.
safe_log <- function(x1) {
return (log(x1 + 1e-100))
}
# reading the data to the memory
#headers are false for taking into account the first data .
data_set <- read.csv("hw02_data_set_images.csv", header = FALSE)
true_y <- read.csv("hw02_data_set_labels.csv", header = FALSE)
#the datas are uploaded now its time to direct them to matrix
# get X and y values
X1 =as.matrix(data_set)
Y1 =as.matrix(true_y)
#seperating the train and test data for every class
#since only the 25 of each class will be the train data , others will be tested.
trainA = X1[1:25,]
trainB = X1[40:64,]
trainC = X1[79:103,]
trainD = X1[118:142,]
trainE = X1[157:181,]
#here I gathered all train data to feed my system at once
trainTotal= rbind(trainA,trainB,trainC,trainD,trainE)
#since only the 14 of each class will be the test data, I will sepereate them.
testA = X1[26:39,]
testB = X1[65:78,]
testC= X1[104:117,]
testD = X1[143:156,]
testE = X1[182:195,]
#here I gathered all test data to test my data at once
testTotal= rbind(testA,testB,testC,testD,testE);
#now I have to make the same procedure to the labels
# I divide all of the label data into train and test again
# But the letters are not okey , I should turn them into numbers
#defaultly the numbers are added as strings, so I change them to numeric.
Y_trainA = Y1[1:25,]
Y_trainA [1:25] = 1;
Y_trainA = as.numeric(Y_trainA);
Y_trainB = Y1[40:64,]
Y_trainB [1:25] = 2;
Y_trainB = as.numeric(Y_trainB);
Y_trainC = Y1[79:103,]
Y_trainC [1:25] = 3;
Y_trainC = as.numeric(Y_trainC);
Y_trainD = Y1[118:142,];
Y_trainD [1:25] = 4;
Y_trainD= as.numeric(Y_trainD);
Y_trainE = Y1[157:181,]
Y_trainE [1:25] = 5;
Y_trainE = as.numeric(Y_trainE);
#I create a matrix from zero and match the proper numbers to them.
Y_trainTotal = matrix(1:125, nrow = 125, ncol = 1)
Y_trainTotal[1:25]=Y_trainA;
Y_trainTotal[26:50]=Y_trainB;
Y_trainTotal[51:75]=Y_trainC;
Y_trainTotal[76:100]=Y_trainD;
Y_trainTotal[101:125]=Y_trainE;
#now I have to make the same procedure to the labels
# I divide all of the label data into train and test again
# But the letters are not okey , I should turn them into numbers
#defaultly the numbers are added as strings, so I change them to numeric.
Y_testA = Y1[26:39,]
Y_testA [1:14] = 1;
Y_testA = as.numeric(Y_testA);
Y_testB = Y1[65:78,]
Y_testB [1:14] = 2;
Y_testB = as.numeric(Y_testB);
Y_testC= Y1[104:117,]
Y_testC [1:14] = 3;
Y_testC = as.numeric(Y_testC);
Y_testD = Y1[143:156,]
Y_testD [1:14] = 4;
Y_testD = as.numeric(Y_testD);
Y_testE = Y1[182:195,]
Y_testE [1:14] = 5;
Y_testE = as.numeric(Y_testE);
Y_testTotal = matrix(1:70, nrow = 70, ncol = 1)
Y_testTotal[1:14]=Y_testA;
Y_testTotal[15:28]=Y_testB;
Y_testTotal[29:42]=Y_testC;
Y_testTotal[43:56]=Y_testD;
Y_testTotal[57:70]=Y_testE;
# get number of classes and number of samples
classNum <- 5 # number of classes
N <- length(Y_trainTotal) #number of samples
# one-of-K-encoding
# I created a zero matrix with size NxK.
# I fill it with the prior prepared Y_traintotal.
R_t <- matrix(0, N, classNum)
R_t[cbind(1:N, Y_trainTotal)] <- 1
# define the sigmoid function with 3 variables
# I use the equations 10.44 and 10.45 when I am preparing the fundamental of the codes.
#10.37 from book --- sigmoid function given
sigmoid <- function(trainTotal, W, w0) {
scores <- cbind(trainTotal, 1) %*% rbind(W, w0)
return (1 / (1 + exp(-scores)))
}
# define the gradient functions
# I use the equations 10.44 and 10.45 when I am preparing the fundamental of the codes.
# following code is exactly same mathematic applied from the 10.44
gradient_W <- function(trainTotal, R_t, Y_predicted) {
return (sapply(X = 1:ncol(R_t), function(b) -colSums(matrix((R_t[,b] - Y_predicted[,b])*(1-Y_predicted[,b])*Y_predicted[,b],
nrow = nrow(trainTotal), ncol = ncol(trainTotal), byrow = FALSE) *trainTotal ))
)
}
# following code is exactly same mathematic applied from the 10.45
gradient_w0 <- function(R_t, Y_predicted) {
return (-colSums((R_t - Y_predicted)*(1-Y_predicted)*Y_predicted))
}
# set learning parameters ( those are given in the lecture)
eta <- 0.01
epsilon <- 1e-3
# randomly initalize W and w0
#wth the following functions I assigned the initial weights to W and W0 and those will be change
# with time by the iterations.
set.seed(521)
W <- matrix(runif(ncol(trainTotal) * classNum, min = -0.01, max = +0.01), ncol(trainTotal), classNum)
w0 <- runif(classNum, min = -0.001, max = 0.001)
# learn W and w0 using gradient descent
# the error function is again from the book 10.45
iteration <- 1
obj_val <- c()
while (1) {
Y_predicted <- sigmoid(trainTotal, W, w0)
obj_val <- c(obj_val, (1/2)*sum((R_t -Y_predicted )^2))
W_past <- W
w0_past <- w0
W <- W - eta * gradient_W(trainTotal, R_t, Y_predicted)
w0 <- w0 - eta * gradient_w0(R_t, Y_predicted)
if (sqrt(sum((w0 - w0_past)^2) + sum((W - W_past)^2)) < epsilon) {
break
}
iteration <- iteration +1
}
#print(W)
#print(w0)
# plot objective function during iterations
plot(1:iteration, obj_val,
type = "l", lwd = 2, las = 1,
xlab = "Iteration", ylab = "Error")
# calculate confusion matrix for the train data
y_predicted <- apply(Y_predicted, 1, which.max)
confusion_matrix <- table(y_predicted, Y_trainTotal)
print(confusion_matrix)
# calculate confusion matrix for the test data
y_predictedTest <- sigmoid (testTotal, W, w0)
y_predicted2 <- apply(y_predictedTest, 1, which.max)
confusion_matrix <- table(y_predicted2, Y_testTotal)
print(confusion_matrix)
|
1ee3bfcd2934945fcb6d96f51797a644a2c336a8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/quanteda/examples/dfm.Rd.R
|
04e966501c8ec50877a3483b2d4ca816c7ae52b7
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,817
|
r
|
dfm.Rd.R
|
library(quanteda)
### Name: dfm
### Title: Create a document-feature matrix
### Aliases: dfm
### Keywords: dfm
### ** Examples
## for a corpus
corpus_post80inaug <- corpus_subset(data_corpus_inaugural, Year > 1980)
dfm(corpus_post80inaug)
dfm(corpus_post80inaug, tolower = FALSE)
# grouping documents by docvars in a corpus
dfm(corpus_post80inaug, groups = "President", verbose = TRUE)
# with English stopwords and stemming
dfm(corpus_post80inaug, remove = stopwords("english"), stem = TRUE, verbose = TRUE)
# works for both words in ngrams too
dfm("Banking industry", stem = TRUE, ngrams = 2, verbose = FALSE)
# with dictionaries
corpus_post1900inaug <- corpus_subset(data_corpus_inaugural, Year > 1900)
mydict <- dictionary(list(christmas = c("Christmas", "Santa", "holiday"),
opposition = c("Opposition", "reject", "notincorpus"),
taxing = "taxing",
taxation = "taxation",
taxregex = "tax*",
country = "states"))
dfm(corpus_post1900inaug, dictionary = mydict)
# removing stopwords
test_text <- "The quick brown fox named Seamus jumps over the lazy dog also named Seamus, with
the newspaper from a boy named Seamus, in his mouth."
test_corpus <- corpus(test_text)
# note: "also" is not in the default stopwords("english")
featnames(dfm(test_corpus, select = stopwords("english")))
# for ngrams
featnames(dfm(test_corpus, ngrams = 2, select = stopwords("english"), remove_punct = TRUE))
featnames(dfm(test_corpus, ngrams = 1:2, select = stopwords("english"), remove_punct = TRUE))
# removing stopwords before constructing ngrams
tokens_all <- tokens(char_tolower(test_text), remove_punct = TRUE)
tokens_no_stopwords <- tokens_remove(tokens_all, stopwords("english"))
tokens_ngrams_no_stopwords <- tokens_ngrams(tokens_no_stopwords, 2)
featnames(dfm(tokens_ngrams_no_stopwords, verbose = FALSE))
# keep only certain words
dfm(test_corpus, select = "*s", verbose = FALSE) # keep only words ending in "s"
dfm(test_corpus, select = "s$", valuetype = "regex", verbose = FALSE)
# testing Twitter functions
test_tweets <- c("My homie @justinbieber #justinbieber shopping in #LA yesterday #beliebers",
"2all the ha8ers including my bro #justinbieber #emabiggestfansjustinbieber",
"Justin Bieber #justinbieber #belieber #fetusjustin #EMABiggestFansJustinBieber")
dfm(test_tweets, select = "#*", remove_twitter = FALSE) # keep only hashtags
dfm(test_tweets, select = "^#.*$", valuetype = "regex", remove_twitter = FALSE)
# for a dfm
dfm1 <- dfm(data_corpus_irishbudget2010)
dfm2 <- dfm(dfm1,
groups = ifelse(docvars(data_corpus_irishbudget2010, "party") %in% c("FF", "Green"),
"Govt", "Opposition"),
tolower = FALSE, verbose = TRUE)
|
12f8d54acdcfb2007a5a9da687671d3a7f4cca13
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/TimeProjection/R/projection.R
|
b577310db2c55e14ad14ab011a3c773fedfcc1dc
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,364
|
r
|
projection.R
|
#' Time Projection
#'
#' Project dates to lower dimensional subspace.
#' Extracts components year, month, yday, mday, hour, minute, weekday,
#' bizday and season from a date object
#'
#' @param dates date or datetime objects
#' @param size either "narrow" or "wide". If narrow, returns a data frame
#' containing the projections as column variables using factors.
#' If wide, returns a sparse matrix containing the projections as column
#' variables using 0-1 variables
#' @param holidays argument to determine which days are considered holidays,
#' affecting the business day projection. By default uses holidayNYSE()
#' provided by the timeDate package, or can be specified as a vector of
#' strings representing dates in the yyyy-mm-dd format
#' @param as.numeric logical only used when size = "narrow". Returns the
#' columns as numeric values instead of factors
#' @param drop logical. If true, drop any column that only has 1 level or only
#' 1 unique element in it
#' @examples
#' dates = timeSequence(from = "2001-01-01", to = "2004-01-01", by = "day")
#' projectDate(as.Date(dates))
#' @export
projectDate = function(dates, size = c("narrow", "wide"),
holidays = holidayNYSE(year = unique(year(dates))),
as.numeric = F, drop = T) {
size = match.arg(size)
year = year(dates)
month = month(dates)
yday = yday(dates)
mday = mday(dates)
yweek = floor((yday - 1) / 7) + 1
mweek = floor((mday - 1) / 7) + 1
hour = hour(dates)
minute = minute(dates)
if (!as.numeric | size == "wide") {
year = factor(year, ordered = T)
month = factor(month, levels = 1:12, ordered = T)
yday = factor(yday, levels = 1:366, ordered = T)
mday = factor(mday, levels = 1:31, ordered = T)
yweek = factor(yweek, levels = 1:53, ordered = T)
mweek = factor(mweek, levels = 1:5, ordered = T)
hour = factor(hour, levels = 0:23, ordered = T)
minute = factor(minute, levels = 0:59, ordered = T)
}
weekday = factor(weekdays(dates), levels = c("Sunday", "Monday", "Tuesday", "Wednesday",
"Thursday", "Friday", "Saturday"),
ordered = T)
bizday = factor(is.Bizday(dates, holidays))
season = factor(getSeason(dates), levels = c("Winter", "Spring", "Summer", "Fall"),
ordered = T)
raw = data.frame(year = year,
month = month,
yweek = yweek,
mweek = mweek,
yday = yday,
mday = mday,
hour = hour,
minute = minute,
weekday = weekday,
bizday = bizday,
season = season)
if (drop) {
redundantCols = rep(F, ncol(raw))
for (i in 1:ncol(raw)) {
if (all(diff(as.numeric(raw[,i])) == 0)) redundantCols[i] = T
}
#redundantCols = apply(raw, 2, function(j) { nlevels(j) == 1 })
raw = raw[,!redundantCols]
}
if (size == "narrow") return (raw)
if (size == "wide") {
return (sparse.model.matrix(~ . -1, raw))
}
}
# Source: http://stackoverflow.com/questions/9500114/find-which-season-a-particular-date-belongs-to
getSeason <- function(dates) {
WS <- as.Date("2012-12-15", format = "%Y-%m-%d") # Winter Solstice
SE <- as.Date("2012-3-15", format = "%Y-%m-%d") # Spring Equinox
SS <- as.Date("2012-6-15", format = "%Y-%m-%d") # Summer Solstice
FE <- as.Date("2012-9-15", format = "%Y-%m-%d") # Fall Equinox
# Convert dates from any year to 2012 dates
d <- as.Date(strftime(dates, format="2012-%m-%d"))
ifelse (d >= WS | d < SE, "Winter",
ifelse (d >= SE & d < SS, "Spring",
ifelse (d >= SS & d < FE, "Summer", "Fall")))
}
is.Bizday = function(x, holidays)
{
char.x = substr(as.character(x), 1, 10)
char.h = substr(as.character(holidays), 1, 10)
Weekday = as.integer(isWeekday(x, wday = 1:5))
nonHoliday = as.integer(!(char.x %in% char.h))
bizdays = as.logical(Weekday * nonHoliday)
return (bizdays)
}
#' @method weekdays timeDate
#' @S3method weekdays timeDate
weekdays.timeDate = function(x, abbreviate=FALSE) {
weekdays(as.Date(x), abbreviate)
}
|
51672775b945f86aecc88faed2f9f806470f8fc1
|
786f434f2f65fc35339283f64ad86cbc2f363300
|
/Library/Mode.Sample.R
|
446610f15066d86564793217f8fd58281eaa30db
|
[] |
no_license
|
amberjaycocks/HIVProbPrEP
|
e483036089b5a8f281c9a66a11a729ca7b0911aa
|
b90676f5c4498076c997cc37f83f7273517dc3ae
|
refs/heads/master
| 2020-05-18T11:46:00.081053
| 2014-06-17T20:40:39
| 2014-06-17T20:40:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 105
|
r
|
Mode.Sample.R
|
Mode.Sample <- function(param){
r <- as.array(param$Mode)
names(r) <- rownames(param)
return(r)
}
|
780ced967a4e0a8a3b11bdec7446671e21a3f87f
|
08df3ba8636658bfb564264d578ec5d700571d33
|
/using_refnet_troubleshooting/mapping_addresses.R
|
3978249b224a137025be09f4795713d7aa063eef
|
[] |
no_license
|
aurielfournier/refnet_materials
|
cbc1c5163051e977478d2887ccdd5dd745303987
|
e0ce97910caec345a3894d1088d7b487dcfd9d23
|
refs/heads/master
| 2020-03-27T02:17:38.835246
| 2018-11-01T15:44:54
| 2018-11-01T15:44:54
| 145,779,762
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 387
|
r
|
mapping_addresses.R
|
library(ggmap)
library(tidyverse)
library(stringr)
library(stringi)
library(refnet)
load("./output/eb_refined.Rdata")
world <- map_data("world")
zz <- address_lat_long(data=eb_refined)
plot_addresses_points(data=zz)
plot_addresses_country(data=zz)
s <- net_plot_coauthor(data=zz)
s$data
q <- net_plot_coauthor_country(data=zz)
q$plot
p <- net_plot_coauthor_address(data=zz)
p$plot
|
1e2ec94390c0cd33d2a13b1866d0eddb5d049bc3
|
5a9956727d7a12f0bf2c697c486a49c2f37ed8c3
|
/man/RFcd.Rd
|
d371456e2f2e44775a206db3577669694d772cc0
|
[] |
no_license
|
mlondschien/hdcd
|
fb1bfcf9315bcda74632e86c415d22357193f5d9
|
c6dd90aceb7921b25d18f9395771415080a6b530
|
refs/heads/master
| 2023-02-04T08:14:54.664927
| 2020-12-29T18:48:35
| 2020-12-29T18:48:35
| 280,871,404
| 2
| 0
| null | 2020-10-22T16:41:27
| 2020-07-19T13:29:06
|
R
|
UTF-8
|
R
| false
| true
| 519
|
rd
|
RFcd.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hdcd.R
\name{RFcd}
\alias{RFcd}
\title{Random Forest change point detection}
\usage{
RFcd(x, delta = 0.1, control = hdcd_control())
}
\arguments{
\item{x}{A matrix with observations in rows}
\item{delta}{Minimal relative segment length, defaults to 0.1}
\item{control}{Control parameter as returned by \link{hdcd_control}}
}
\description{
Find (non-parametric) breaks in the distribution of a time series using Random Forest Classifiers
}
|
cc8be77b94d03598b78c824af24524cd61935995
|
f5a491f6cb11aca7d592ddebe2a75e6bbd28055d
|
/run_analysis.R
|
7cd83d8e45f90a1a433337c8605d4deedcaabe8f
|
[] |
no_license
|
Lunoj/Coursera_Data_Cleaning_Project
|
6ccf973cda00953f131cc185ecd7ee631f6a08c3
|
45dd9f1c5565c3315d0745c1e6a4af9bf84e53ea
|
refs/heads/master
| 2021-01-25T09:32:34.380153
| 2017-06-09T13:33:18
| 2017-06-09T13:33:18
| 93,854,022
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,159
|
r
|
run_analysis.R
|
# load required library
library(plyr)
library(dplyr)
library(data.table)
# download file
if(!file.exists("./data")){dir.create("./data")}
fileurl <- "http://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileurl,destfile = "./data/Dataset.zip")
#Unzip file
unzip("./data/Dataset.zip")
#Set Datafile pat
setwd("C:/Users/Patil/Data_Science")
filepath <- file.path("./Data","UCI HAR Dataset")
filelist <- list.files(filepath,recursive = TRUE)
# Read Result Data files
testactivityresult <- read.table(file.path(filepath,"test","X_test.txt"),header = FALSE)
trainactivityresult <- read.table(file.path(filepath,"train","X_train.txt"),header = FALSE)
# Read activity file
testactivity <- read.table(file.path(filepath,"test","Y_test.txt"),header = FALSE)
trainactivity <- read.table(file.path(filepath,"train","Y_train.txt"),header = FALSE)
# Read volunteers data
testvolunteersdata <- read.table(file.path(filepath,"test","subject_test.txt"),header = FALSE)
trainvolunteersdata <- read.table(file.path(filepath,"train","subject_train.txt"),header = FALSE)
# Mergeing data horizontal
activityresult<- rbind(testactivityresult,trainactivityresult)
activity <- rbind(testactivity,trainactivity)
volunteersdata <- rbind(testvolunteersdata,trainvolunteersdata)
# Give header
#colnames(activity) <- "activity"
#colnames(volunteersdata) <- "subject"
features <- read.table(file.path(filepath,"features.txt"),header = FALSE)
combinedata <- cbind(volunteersdata,activity)
combinedata <- cbind(combinedata,activityresult)
# Tranpose features and table only header
tranposefeatures <- t(features)
tranposefeatures <- tranposefeatures[2,]
tranposefeatures <- c(c("subject","activity"),tranposefeatures)
combinedata <- setNames(combinedata,tranposefeatures)
# read activities
activitylable <- read.table(file.path(filepath,"activity_labels.txt"),header = FALSE)
colnames(activitylable) <- c("activity","activities")
combinedata <- join(activitylable, combinedata,by="activity")
combinedata$activity <- NULL
combinedata$activities <- as.factor(combinedata$activities)
# Change names to Appropriately activity names and subject names
names(combinedata) <- gsub("^f","frequence",names(combinedata))
names(combinedata) <- gsub("^t","time",names(combinedata))
names(combinedata) <- gsub("Acc","Accelerometer",names(combinedata))
names(combinedata) <- gsub("Gyro","Gyroscope",names(combinedata))
names(combinedata) <- gsub("Mag","Magnitude",names(combinedata))
combinedata$subject <- as.character(combinedata$subject)
combinedata$subject <- paste("subject", combinedata$subject)
# Extract only Mean and Std
meanrange <- grep("mean()", names(combinedata), value = FALSE, fixed = TRUE)
Meandatset <- combinedata[meanrange]
stdrange <- grep("std()", names(combinedata), value = FALSE, fixed = TRUE)
Stddatset <- combinedata[stdrange]
# Create Tidy Data Set
TidyData <- data.table(combinedata)
TidyData <- TidyData[,lapply(.SD,mean),by = "subject,activities"]
write.table(TidyData, file = "TidyDataSet.txt", row.names = FALSE)
|
23971f6b8db163df2ee20b728791a6d279256321
|
94dcbff4ef2072f5a5ecbb95af1f259f31ad3b20
|
/man/int.est.rm.Rd
|
c73a85ba0d554059eabaeed964576f13d54fb4bf
|
[] |
no_license
|
DistanceDevelopment/WiSP
|
bf51406076ded020098f4973003eafc05a45d437
|
e0e2665d6b3b49ba634944b4bb7303be41620e5a
|
refs/heads/master
| 2021-06-05T02:54:08.957306
| 2020-09-14T20:03:59
| 2020-09-14T20:03:59
| 9,773,511
| 0
| 1
| null | 2020-09-14T09:30:06
| 2013-04-30T15:05:50
|
R
|
ISO-8859-1
|
R
| false
| false
| 3,238
|
rd
|
int.est.rm.Rd
|
\name{int.est.rm}
\alias{int.est.rm}
\title{Removal Method Abundance Estimation: Interval Estimate}
\description{
This function estimates the animal population size for the current survey sample of the simple removal method.
}
\usage{
int.est.rm(samp, ci.type = "boot.nonpar", nboot = 999, vlevels = c(0.025,0.975), numerical = TRUE, plot = FALSE, seed = NULL, ...)
}
\arguments{function (samp, ci.type="boot.nonpar", nboot=999, vlevels=c(0.025,0.975), numerical=TRUE, plot=FALSE, seed=NULL, ...)
\item{samp}{object of class `sample.rm´.}
\item{ci.type}{method for constructing the confidence interval. Possible methods are
* `boot.par´ for parametric bootstrap CI,
* `boot.nonpar´ for nonparametric bootstrap CI, or
* `profile´ to get a profile likelihood CI.}
\item{nboot}{number of bootstrap replications.}
\item{vlevels}{vector of percentage levels for confidence intervals.}
\item{numerical}{if TRUE, estimation is by numerical maximisation of the log likelihood function. Otherwise, estimation is by analytical methods}
\item{plot}{if true the distribution of the estimator of N is plotted (bootstrap methods only).}
\item{seed}{the number passed to set.seed() to initialise random number generator}
\item{...}{other plot parameters}
}
\value{
An object of class `int.est.rm´ containing the following items:
\item{levels}{percentage levels for confidence interval}
\item{ci}{the confidence interval}
\item{boot.mean}{mean of bootstrap estimates}
\item{boot.dbn}{full set of nboot bootstrap estimates.}
\item{se}{standard error}
\item{cv}{coefficient of variation}
\item{ci.type}{Equal to the object 'ci.type' passed to the function}
\item{proflik}{(not currently used)}
\item{numerical}{Equal to the object 'numerical' passed to the function}
\item{parents}{Details of WiSP objects passed to function}
\item{created}{Creation date and time}
\item{seed}{Equal to the argument 'seed' passed to the function}
}
\note{
The warning `NA/Inf replaced by maximum positive value' may occur; this seems usually to be because the numerical optimization routine tried searching in an infeasible region of the paramter space.
}
\seealso{
\code{\link{setpars.survey.rm}}, \code{\link{generate.sample.rm}}
\code{\link{point.est.rm}}, \code{\link{set.seed}}
}
\examples{
rm.reg<-generate.region(x.length=100, y.width=50)
rm.dens <- generate.density(rm.reg)
rm.poppars<-setpars.population(density.pop = rm.dens, number.groups = 100, size.method = "poisson", size.min = 1, size.max = 5, size.mean = 1, exposure.method = "beta", exposure.min = 2, exposure.max = 10, exposure.mean = 3, exposure.shape = 0.5, type.values=c("Male","Female"), type.prob=c(0.48,0.52))
rm.pop<-generate.population(rm.poppars)
rm.des<-generate.design.rm(rm.reg, n.occ = 5, effort=c(1,2,3,2,1))
rm.survpars<-setpars.survey.rm(pop=rm.pop, des=rm.des, pmin=0.03, pmax=0.95, improvement=0.05)
rm.samp<-generate.sample.rm(rm.survpars)
rm.ci<-int.est.rm(rm.samp)
summary(rm.ci)
plot(rm.ci, est="Nhat.grp")
plot(rm.ci, est="Es", type="hist")
plot(rm.ci, est="Nhat.ind", type="box")
}
\keyword{}
|
aeeaeaddaec912f6ab3879dc7399147d64f2e889
|
c005a2c57e87a330a2412997bfd38fd85c917f4e
|
/code/script_Feb2018.r
|
68971bd1870ed84d9f7228b93b6c26e22e1bf0cd
|
[] |
no_license
|
richardli/RandomizedVAstudy
|
a383ef89b631a764f52bf97d526de20067c48ae4
|
4f2f4059f86db2d025704e3f5033935097f588d2
|
refs/heads/master
| 2021-01-17T14:57:00.721793
| 2018-05-16T18:04:00
| 2018-05-16T18:04:00
| 51,721,626
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,660
|
r
|
script_Feb2018.r
|
###################################################################################
## This script perform the experiments 7 and 8 for the new data
## from 3 sites in India
###################################################################################
remove(list = ls())
library(openVA)
sites <- c("Amravati", "Anand", "Mansa")
counter <- 1
fitnames <- rep(NA, 9)
fit <- out1 <- out2 <- out3 <- vector("list", 9)
Nitr <- 10000
###################################################################################
## Experiment 1: adult PHMRC-India ~ [Amravati, Anand, Mansa]
for(site in sites){
files <- list.files(paste0("../Data_Feb2018/", site ))
for(file in files){
filename = gsub(".csv", "", file)
raw <- read.csv(paste0("../Data_Feb2018/", site, "/", file))
data <- ConvertData(raw, yesLabel = "yes", noLabel = "no", missLabel = "missing")
fit[[counter]] <- codeVA(data = data,
data.type = "WHO",
model = "InSilicoVA",
Nsim=Nitr, auto.length = FALSE,
jump.scale = 0.001)
csmf <- getCSMF(fit[[counter]])[,1]
prob <- apply(fit[[counter]]$indiv.prob, 2, mean)
out1[[counter]] <- getTopCOD(fit[[counter]])
out2[[counter]] <- cbind(Population = csmf,
Sample = prob)
out3[[counter]] <- getIndivProb(fit[[counter]])
write.csv(out2[[counter]],
file = paste0("../Data_Feb2018/InSilico_results/", site, "/", filename, "_InSilicoVA_format2.csv"), row.names = TRUE)
fitnames[[counter]] <- paste0(site, "_experiment7")
names(fit)[counter] <- names(out1)[counter] <- names(out2)[counter] <- names(out3)[counter] <- fitnames[[counter]]
counter <- counter + 1
}
}
|
d7c7bb5f0b20d60ddd6e431096725e8eae388002
|
04cf2ed7a926c177b264a7663115d3c7d76bacb1
|
/week2/corr.R
|
87a9fb883cea01f6b5b3cb7ba2e799b4d8e72907
|
[] |
no_license
|
GiovanniCassani/R_programmingAssignments
|
9ecca208a87b9e63de6c4f26cd6ce7ec259dfae8
|
01173f61eea0d3de445bd0e7abfdadb3e619bbcd
|
refs/heads/master
| 2021-01-10T19:05:30.586608
| 2015-01-15T15:48:06
| 2015-01-15T15:48:06
| 29,242,581
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 438
|
r
|
corr.R
|
corr <- function(directory, threshold = 0) {
files <- list.files(directory)
corrs <- numeric(0)
idx = 1
for (i in seq_along(files)) {
file <- paste(directory, files[i], sep = '/')
dataSet <- read.csv(file)
rows <- na.omit(dataSet)
if(nrow(rows) >= threshold) {
corrs[idx] <- cor(rows$nitrate, rows$sulfate)
idx = idx + 1
}
}
corrs
}
|
c1bbf629ccf4af6eadc8dc7a369631c3c3a13e2b
|
7fee2033e776446ef9990582811b956f70176874
|
/R/klein.R
|
73c043262c4ba97a4119cbee179a0cef1673893f
|
[] |
no_license
|
tallulandrews/scRNA.seq.datasets
|
4930fd1a45ec3cf24b2a43253ce627df86692470
|
43e4957a3cb4d46f566133955242b2919dd7eadc
|
refs/heads/master
| 2021-01-21T20:47:39.616331
| 2017-05-23T21:32:46
| 2017-05-23T21:32:46
| 92,283,314
| 0
| 0
| null | 2017-05-24T11:07:10
| 2017-05-24T11:07:10
| null |
UTF-8
|
R
| false
| false
| 1,035
|
r
|
klein.R
|
library(scater)
# load data
d0 <- read.csv("klein/GSM1599494_ES_d0_main.csv", header = FALSE)
d2 <- read.csv("klein/GSM1599497_ES_d2_LIFminus.csv", header = FALSE)
d4 <- read.csv("klein/GSM1599498_ES_d4_LIFminus.csv", header = FALSE)
d7 <- read.csv("klein/GSM1599499_ES_d7_LIFminus.csv", header = FALSE)
d <- cbind(d0, d2[,2:ncol(d2)], d4[,2:ncol(d4)], d7[,2:ncol(d7)])
rownames(d) <- d[,1]
d <- d[,2:ncol(d)]
colnames(d) <- paste0("cell", 1:ncol(d))
# cell annotations
ann <- data.frame(
cell_type1 = c(rep("d0", ncol(d0) - 1),
rep("d2", ncol(d2) - 1),
rep("d4", ncol(d4) - 1),
rep("d7", ncol(d7) - 1)))
rownames(ann) <- colnames(d)
pd <- new("AnnotatedDataFrame", data = ann)
# create scater object
sceset <- newSCESet(countData = as.matrix(d), phenoData = pd, logExprsOffset = 1)
sceset <- calculateQCMetrics(sceset)
# use gene names as feature symbols
sceset@featureData@data$feature_symbol <- featureNames(sceset)
# save data
saveRDS(sceset, "klein.rds")
|
88a648b476700deb2d94c7af625943adf134a286
|
40a6affe413e20f6b76b0fa6626cbddc0a568b3b
|
/vignettes/amigaDiskFiles.R
|
ffdb5ba6f5417fe397145bb6dd9a9ee7c19109ef
|
[] |
no_license
|
zbarutcu/adfExplorer
|
7e3f79a5b8b32754ef174ac9ccd2da8217a61739
|
5fb85322fa42e8f9e0a2067b413302d8e4539ef1
|
refs/heads/master
| 2023-07-25T22:42:38.574018
| 2021-09-05T11:28:10
| 2021-09-05T11:28:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,197
|
r
|
amigaDiskFiles.R
|
## -----------------------------------------------------------------------------
library(adfExplorer)
blank.disk <- new("amigaDisk")
## -----------------------------------------------------------------------------
## Create with constructor:
blank.block <- new("amigaBlock")
## Extract the first block from an amigaDisk object:
blank.block <- amigaBlock(blank.disk, block = 0)
## -----------------------------------------------------------------------------
get.blockID(disktype = "DD", sector = 4, side = 0, cylinder = 35)
## -----------------------------------------------------------------------------
get.diskLocation(disktype = "DD", block = 1231)
## -----------------------------------------------------------------------------
blank.disk <- blank.amigaDOSDisk(diskname = "empty")
print(blank.disk)
## -----------------------------------------------------------------------------
data("adf.example")
## ----eval=FALSE---------------------------------------------------------------
# amigaBlock(adf.example, 880)
## ----eval=FALSE---------------------------------------------------------------
# amigaBlock(adf.example, 881)
## -----------------------------------------------------------------------------
list.adf.files(adf.example)
## -----------------------------------------------------------------------------
## get the file from the amigaDisk object:
startup <- get.adf.file(adf.example, "df0:s/Startup-Sequence")
## the file content is returned as raw data.
## let's convert it to text:
startup <- rawToChar(startup)
## let's show it
cat(startup)
## -----------------------------------------------------------------------------
## first get the file as raw data.
mod.raw <- get.adf.file(adf.example, "df0:mods/mod.intro")
## For the rest of the example we need
## the ProTrackR package
if (requireNamespace("ProTrackR", quietly = TRUE)) {
con <- rawConnection(mod.raw, "rb")
## and read it as a ProTracker module
mod <- ProTrackR::read.module(con)
close(con)
## plot the first sample from the module:
par(mar = c(5, 4, 0, 0) + 0.1)
plot(ProTrackR::waveform(ProTrackR::PTSample(mod, 1)),
type = "l", ylab = "Amplitude")
## and to play it, uncomment the following line:
## ProTrackR::playMod(mod)
}
## -----------------------------------------------------------------------------
adf.example <- dir.create.adf(adf.example, "temp")
## -----------------------------------------------------------------------------
adf.example <- put.adf.file(adf.example,
system.file("DESCRIPTION", package = "adfExplorer"),
"DF0:temp")
## -----------------------------------------------------------------------------
adf.example <- put.adf.file(adf.example,
charToRaw("This is just some text to create some content"),
"DF0:temp/example.txt")
## -----------------------------------------------------------------------------
list.adf.files(adf.example, "DF0:temp/")
## -----------------------------------------------------------------------------
adf.example <-
adf.file.remove(adf.example, "DF0:temp")
list.adf.files(adf.example, "DF0:")
|
b5e95110aa1fad4a4cfeeace3580696f123f93d2
|
6acd86b9f9e76bb0eb3c08c650e16354d32eee77
|
/traclus/partitioning.R
|
3b62076b4daa7489242b84b1f47381b32e1f8553
|
[] |
no_license
|
yuen26/hntaxicab-shiny
|
4fab3117f965240546fe275f1ff2b57cec2c9794
|
211d99995ee07152a7ab0d2489d501b70c44c222
|
refs/heads/master
| 2021-08-27T23:31:13.422521
| 2017-12-10T19:27:18
| 2017-12-10T19:27:18
| 99,350,497
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,664
|
r
|
partitioning.R
|
# ===================== MDL functions ======================
descriptionCost <- function(trajectory, startIndex, endIndex) {
startSegment <- trajectory[[startIndex]]
endSegment <- trajectory[[endIndex]]
distance <- measureEuclidDistance(startSegment, endSegment)
if (distance < 1) {
distance <- 1
}
return(log2(distance))
}
encodingCost <- function(trajectory, startIndex, endIndex) {
startComponent <- trajectory[[startIndex]]
endComponent <- trajectory[[endIndex]]
sum <- 0
for (i in startIndex:(endIndex - 1)) {
startSegment <- trajectory[[i]]
endSegment <- trajectory[[i + 1]]
perpendicularDistance <- measurePerpendicularDistance(startComponent, endComponent, startSegment, endSegment)
angleDistance <- measureAngleDistance(startComponent, endComponent, startSegment, endSegment)
if (perpendicularDistance < 1) {
perpendicularDistance <- 1
}
if (angleDistance < 1) {
angleDistance <- 1
}
sum <- sum + log2(perpendicularDistance) + log2(angleDistance)
}
return(sum)
}
MDLPar <- function(trajectory, startIndex, endIndex) {
descriptionCost(trajectory, startIndex, endIndex) + encodingCost(trajectory, startIndex, endIndex)
}
MDLNoPar <- function(trajectory, startIndex, endIndex) {
descriptionCost(trajectory, startIndex, endIndex)
}
# =========== Approximate Trajectory Partitioning ============
partitioning <- function(trajectory) {
# characteristic points
cp <- list()
cp[[1]] <- trajectory[[1]]
startIndex <- 1
length <- 1
while (startIndex + length <= length(trajectory)) {
currIndex <- startIndex + length
costPar <- MDLPar(trajectory, startIndex, currIndex)
costNoPar <- MDLNoPar(trajectory, startIndex, currIndex)
# check if partitioning at the current point makesthe MDL cost larger than not partitioning
if (costPar > costNoPar) {
# partition at the previous point
cp[[length(cp) + 1]] <- trajectory[[currIndex - 1]]
startIndex <- currIndex - 1
length <- 1
} else {
length <- length + 1
}
}
cp[[length(cp) + 1]] <- trajectory[[length(trajectory)]]
# Remove stop points
flags <- rep(1, length(cp))
for (i in 1:(length(cp) - 1)) {
if (flags[i] == 1) {
if (cp[[i]][1] == cp[[i + 1]][1] && cp[[i]][2] == cp[[i + 1]][2]) {
flags[i] <- 0
}
}
}
newCp <- list()
for (i in 1:(length(flags))) {
if (flags[i] == 1) {
newCp[[length(newCp) + 1]] <- cp[[i]]
}
}
# Build line segments
lineSegments <- list()
for (i in 1:(length(newCp) - 1)) {
lineSegments[[i]] <- c(newCp[[i]], newCp[[i + 1]])
}
return(lineSegments)
}
|
9c3e4a078e6aa933992b89420c11a9b9a474edc7
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googledfareportingv27.auto/man/TagSetting.Rd
|
168b62e9139f520c5680f111f17e7bf76c1cef5b
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 806
|
rd
|
TagSetting.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dfareporting_objects.R
\name{TagSetting}
\alias{TagSetting}
\title{TagSetting Object}
\usage{
TagSetting(additionalKeyValues = NULL, includeClickThroughUrls = NULL,
includeClickTracking = NULL, keywordOption = NULL)
}
\arguments{
\item{additionalKeyValues}{Additional key-values to be included in tags}
\item{includeClickThroughUrls}{Whether static landing page URLs should be included in the tags}
\item{includeClickTracking}{Whether click-tracking string should be included in the tags}
\item{keywordOption}{Option specifying how keywords are embedded in ad tags}
}
\value{
TagSetting object
}
\description{
TagSetting Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Tag Settings
}
|
a2ee5f148a770b42656407949409dfd3e41e5948
|
69a62f8dab62e35a0fcb2f23bfd35bc4f401324f
|
/tests/testthat.R
|
413a30f4492fce3ade4f63b15b652ea27e806b0c
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
kleinschmidt/daver
|
d86f880374094bcfe96ea7683a04e85c96ae30e1
|
501c8dfbf77af49ff512beae7d09e582dd8adc94
|
refs/heads/master
| 2021-01-19T04:37:35.600284
| 2018-03-28T19:32:21
| 2018-03-28T19:32:21
| 46,999,898
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 54
|
r
|
testthat.R
|
library(testthat)
library(daver)
test_check("daver")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.