blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b913af87b88101d1668dde1f553ad1aa0dc6f65e | ebbe08d58a57ae2e9d308a12df500e1e0ef8d098 | /scRef/codes/Benchmark_bak/fig_cross_species.R | fb1e88aec855a3e5e24faf76aa5903dfdc4e79ca | [
"MIT"
] | permissive | Drizzle-Zhang/bioinformatics | a20b8b01e3c6807a9b6b605394b400daf1a848a3 | 9a24fc1107d42ac4e2bc37b1c866324b766c4a86 | refs/heads/master | 2022-02-19T15:57:43.723344 | 2022-02-14T02:32:47 | 2022-02-14T02:32:47 | 171,384,799 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,292 | r | fig_cross_species.R | library(ggplot2)
library(RColorBrewer)
# data
ref.dataset <- c('BaronM', 'BaronM', 'MCA', 'MCA')
sc.dataset <- c('panc8_celseq2', 'panc8_smartseq2', 'pbmc3k', 'pbmcsca_10Xv2')
sc.legends <- c('Baron(inDrop) -> Muraro(CEL-seq2), Pancreas',
'Baron(inDrop) -> Segerstolpe(SMART-seq2), Pancreas',
'Han(Microwell-seq) -> Butler(10X v1), PBMC',
'Han(Microwell-seq) -> Ding(10X v2), PBMC')
path.res <- '/home/zy/scRef/Benchmark/cross_species/'
path.fig <- '/home/zy/scRef/figure/cross_species/'
methods <- c("scMAGIC", "singleR", "scmap-cell", "scmap-cluster", "CHETAH", "scPred",
"sciBet", "singleCellNet", "scID", "scClassify")
# Accuracy
df.plot.acc <- data.frame(stringsAsFactors = F)
for (i in 1:length(sc.dataset)) {
ref.data <- ref.dataset[i]
sc.data <- sc.dataset[i]
sc.legend <- sc.legends[i]
file.res <- paste0(path.res, 'results_', ref.data, '_', sc.data, '.txt')
sub.res <- read.delim(file = file.res, row.names = 1, stringsAsFactors = F)
sub.res <- sub.res[sub.res$term == 'Accuracy',]
rownames(sub.res) <- sub.res$method
sub.plot <- data.frame(sub.res[methods, 'value'], row.names = methods)
names(sub.plot) <- sc.legend
if (i == 1) {
df.plot.acc <- sub.plot
} else {
df.plot.acc <- cbind(df.plot.acc, sub.plot)
}
}
df.plot.acc$`Mean Accuracy` <- rowMeans(df.plot.acc)
# Macro F1
df.plot.macrof1 <- data.frame(stringsAsFactors = F)
for (i in 1:length(sc.dataset)) {
ref.data <- ref.dataset[i]
sc.data <- sc.dataset[i]
sc.legend <- sc.legends[i]
file.res <- paste0(path.res, 'results_', ref.data, '_', sc.data, '.txt')
sub.res <- read.delim(file = file.res, row.names = 1, stringsAsFactors = F)
sub.res <- sub.res[sub.res$term == 'Macro F1',]
rownames(sub.res) <- sub.res$method
sub.plot <- data.frame(sub.res[methods, 'value'], row.names = methods)
names(sub.plot) <- sc.legend
if (i == 1) {
df.plot.macrof1 <- sub.plot
} else {
df.plot.macrof1 <- cbind(df.plot.macrof1, sub.plot)
}
}
df.plot.macrof1$`Mean Micro F1` <- rowMeans(df.plot.macrof1)
df.plot <- cbind(df.plot.acc, df.plot.macrof1)
df.plot <- df.plot[order(df.plot$`Mean Accuracy`, decreasing = T),]
pheatmap::pheatmap(df.plot,
color = colorRampPalette(rev(brewer.pal(n = 9, name = "RdYlBu"))[1:9])(100),
cluster_rows = F, cluster_cols = F, scale = "none",
display_numbers = T, number_format = "%.2f", fontsize_number = 15, number_color = 'black',
show_rownames = T, show_colnames = T, fontsize_row = 18, fontsize_col = 15,
legend = T, drop_levels = F,
gaps_col = c(5, 10), angle_col = '315',
filename = paste0(path.fig, 'heatmap_CS.png'), width = 11, height = 12
)
# boxplot
df.box <- data.frame(stringsAsFactors = F)
mat.box1 <- df.plot[, c(1:4)]
for (method in rownames(mat.box1)) {
for (dataset in colnames(mat.box1)) {
df.sub <- data.frame(Accuracy = mat.box1[method, dataset],
Method = method, Dataset = dataset, Type = 'Accuracy')
df.box <- rbind(df.box, df.sub)
}
}
mat.box2 <- df.plot[, c(6:9)]
for (method in rownames(mat.box2)) {
for (dataset in colnames(mat.box2)) {
df.sub <- data.frame(Accuracy = mat.box2[method, dataset],
Method = method, Dataset = dataset, Type = 'Macro F1')
df.box <- rbind(df.box, df.sub)
}
}
df.box$Method <- factor(df.box$Method, levels = rev(rownames(df.plot)))
plot.box <-
ggplot(df.box, aes(x = Method, y = Accuracy, color = Type)) +
geom_boxplot() +
theme_classic() + coord_flip() +
# facet_wrap(~ Evaluator, scales = 'free_x', ncol = 2) +
labs(title = "", y = '', x = '', fill = 'Metrics') +
theme(axis.text.y = element_blank())
# theme(axis.text = element_text(size = 9),
# panel.grid = element_blank(),
# panel.grid.major.y = element_line(color = 'grey', size = 0.2),
# axis.text.x = element_text(angle = 45, hjust = 1),
# axis.title = element_text(size = 12))
ggsave(filename = 'boxplot_CS.png',
path = path.fig, plot = plot.box,
units = 'cm', height = 11, width = 14)
|
589146b330ab688cef783830a7d246e81bfb69b0 | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed_and_cleaned/10485_2/rinput.R | b63b4c2d760a28a05ad91551b86f320d016cc1dd | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 137 | r | rinput.R | library(ape)
testtree <- read.tree("10485_2.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10485_2_unrooted.txt") |
6d45c29fa0ba451c023a0f068ac8910cbec89edd | 2b08c32c1b91f99414f808803d2fff4dc43e5ff1 | /R/buildparams.r | fa145171882f354a77057563d7b8ab0c730b462e | [] | no_license | AustralianAntarcticScience/ptrackr | 598d5bab337acc111c9d0cd7f92fd3869bd42f03 | 86d8440500b70b8e11641dc43a432fafbed3d728 | refs/heads/master | 2021-01-11T04:27:37.793303 | 2016-09-02T01:13:00 | 2016-09-02T01:13:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,072 | r | buildparams.r | #' buildparams
#'
#' Function to set up parameters for the sedimentation equation used in the trackit_2D function
#'
#' @param speed sinking speed
#' @param r particle radius
#'
#' @return list(p0 = p0, p1 = p1, cosO = cosO, g = g, K = K, E = E, r = r, Wd = Wd, Ucsq = Ucsq, SedFunct = SedFunct)
#' @export
buildparams <- function(speed,
## from Jenkins & Bombosch (1995)
p0 =1030, #kg/m^3 seawater density
p1 =1100, #kg/m^3 Diatom density (so far a quick-look-up-average density from Ierland & Peperzak (1984))
cosO =1, #its 1 for 90degrees
g =9.81, #accelaration due to gravity
K =0.0025, #drag coefficient
E =1, #aspect ration of settling flocks (spherical = 1 ??)
r =0.00016, #particle-radius
Ucsq =-(0.05*(p0-p1)*g*2*(1.5*E)^(1/3)*r)/(p0*K),
Wd =speed,#/24/3600,
SedFunct =function(U_div,dens) 1800*-(p1*(dens)*Wd*cos(90)*(U_div)*(U_div))/p0){
list(p0 = p0, p1 = p1, cosO = cosO, g = g, K = K, E = E, r = r, Wd = Wd, Ucsq = Ucsq, SedFunct = SedFunct)
}
|
121eaed97b496fd11347c0cef5e8211df69988e4 | 10fa702aecca039f7221eb63b1e86a2c4fda526e | /plot2.R | 0f2ceea554917df8575a44f4052c0163d60afbf2 | [] | no_license | huckdn/Exploratory-Data-Analysis---Project1 | d787c864efe7e3de0f7c626500b364b6fb803ff3 | 2899c351ba6edc92d41f8d2cbb015f5087c9fdf9 | refs/heads/master | 2021-01-19T22:10:07.550144 | 2015-05-10T23:03:30 | 2015-05-10T23:03:30 | 35,390,744 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 665 | r | plot2.R | rawData <- read.table("household_power_consumption.txt",header=T,sep=";", na.strings="?",colClasses=c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"))
rawData$DateTime <- paste(rawData$Date, rawData$Time)
rawData$DateTimeActual <- strptime(rawData$DateTime,format="%d/%m/%Y %H:%M:%S")
tidy <- subset(rawData, rawData$DateTimeActual >= as.POSIXct("2007-02-01 00:00:00") & rawData$DateTimeActual <= as.POSIXct("2007-02-02 23:59:00"))
#second plot
png(file="plot2.png",width = 480, height = 480)
plot(tidy$Global_active_power,x = tidy$DateTimeActual,ylab="Global Active Power (kilowatts)",xlab ="",type='l')
dev.off() |
f5cc42db6ffe5f778d4facc6c04cda7cc42fc7f7 | 382624a9e4b24389735e5490f8333b112feb103d | /Simulation Functions.R | 625a3aa091f0b04aa11fb852eee14bf792e5f42a | [] | no_license | joshcullen/method-comparison | 283f92c2dd0d2af0555261fdf151a06b56a70e99 | d46399b569e5b30d172f50524bf809c20c0669fb | refs/heads/master | 2020-11-25T14:05:12.536407 | 2020-02-05T15:51:55 | 2020-02-05T15:51:55 | 228,704,768 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,333 | r | Simulation Functions.R | ### CRW model
CRW.sim=function(n, behav, SL.params, TA.params, Z0) {
#n=duration of each randomly sampled state
#behav=vector of behavioral states
#SL.params=df of shape and scale params
#TA.params=df of mean TA and concen. param
#Z0=initial location
#uses gamma and wrapped cauchy distribs
#behaviors params must be in order
#for simulating w/ 3 behavioral states
#create vector of step lengths
SL<- vector("list", length(behav))
for (i in 1:length(behav)) {
if (behav[i] == 1) {
SL[[i]]<- rgamma(n, shape = SL.params[1,1], scale = SL.params[1,2]) #Rest
} else if (behav[i] == 2) {
SL[[i]]<- rgamma(n, shape = SL.params[2,1], scale = SL.params[2,2]) #Exploratory
} else {
SL[[i]]<- rgamma(n, shape = SL.params[3,1], scale = SL.params[3,2]) #Transit
}
}
SL<- unlist(SL)
#create vector of turning angles
TA<- vector("list", length(behav))
for (i in 1:length(behav)) {
if (behav[i] == 1) {
TA[[i]]<- rwrappedcauchy(n, mu=circular(TA.params[1,1]), rho=TA.params[1,2]) %>%
ifelse(. > pi, .-(2*pi), .) #Rest
} else if (behav[i] == 2) {
TA[[i]]<- rwrappedcauchy(n, mu=circular(TA.params[2,1]), rho=TA.params[2,2]) %>%
ifelse(. > pi, .-(2*pi), .) #Exploratory
} else {
TA[[i]]<- rwrappedcauchy(n, mu=circular(TA.params[3,1]), rho=TA.params[3,2]) %>%
ifelse(. > pi, .-(2*pi), .) #Transit
}
}
TA<- unlist(TA)
# cumulative angle
Phi <- cumsum(TA)
# step length components
dX <- SL*cos(Phi)
dY <- SL*sin(Phi)
# actual X-Y values
X <- c(Z0[1], Z0[1] + cumsum(dX))
Y <- c(Z0[2], Z0[2] + cumsum(dY))
track<- data.frame(x = X, y = Y, SL = c(NA,SL), TA = c(NA, TA),
true.behav = as.factor(c(NA, rep(behav, each=n))))
track
}
#----------------------------------------------
### BRW model (single phase)
BRW.sim=function (n = 50, a, b, Z.center = c(0,0), Z0 = c(3,4), rho) {
#based on BCRW model described in Bailey et al. 2018 "Navigational efficiency in a biased and correlated random walk model of individual animal movement". Ecology. 99(1): 217-223.
Z <- matrix(c(Z0, rep(NA, n*2 - 2)), n, 2, byrow = T) #matrix of simulated locations
ref.pt<- c(1,0) #for reference vector
#function to calc angle between 2 vectors
angle <- function(x,y){
dot.prod <- x%*%y
norm.x <- norm(x,type="2")
norm.y <- norm(y,type="2")
theta <- acos(dot.prod / (norm.x * norm.y))
as.numeric(theta)
}
for (i in 2:n) {
u<- as.matrix(c(Z.center-Z[i-1,])) #vector formed by location and AC
v<- as.matrix(ref.pt) #reference vector for positive x-axis (representing 0 rad)
#angle between loc-AC vector and the positive x-axis multiplied by the sign of the vector (x-component)
if (u[1] < 0 & u[2] < 0) {
ang.sign<- -1 #1st quadrant
} else if (u[1] > 0 & u[2] < 0) {
ang.sign<- -1 #2nd quadrant
} else if (u[1] > 0 & u[2] > 0) {
ang.sign<- 1 #3rd quadrant
} else {
ang.sign<- 1 #4th quadrant
}
#angle directed towards AC from location
omega<- angle(t(u), v) * ang.sign
if (is.na(omega))
omega<- 0
#generate navigation error (-pi < theta < pi) so that mean of cos(theta) is in [0,1]
phi <- rnorm(1, 0, 0.5)
#angle towards AC + error
mu <- omega + phi
if (mu < 0)
mu<- 2 * pi + (omega + phi)
theta <- rwrappedcauchy(1, mu = circular(mu), rho = rho) %>% as.numeric()
# new step
dist<- rgamma(1, a, b) #step length
dx <- dist * cos(theta)
dy <- dist * sin(theta)
# actual X-Y values
x <- Z[i-1, 1] + dx
y <- Z[i-1, 2] + dy
Z[i,] <- c(x,y)
}
track <- data.frame(x = Z[,1], y = Z[,2])
return(track)
}
#----------------------------------------------
### multiBRW model (multi-phase)
multiBRW.sim=function (Z0, Z.centers, n, nphases, a, b, rho, ...) {
Z.list <- list()
Z.centers<- as.matrix(Z.centers)
#for visiting each Z.center (AC) once depending on nphases
if (nphases > nrow(Z.centers) & nphases %% nrow(Z.centers) == 0) {
fold<- ceiling(nphases / nrow(Z.centers)) #number of times to sample
ind<- c(sample(x = nrow(Z.centers), size = nrow(Z.centers), replace = FALSE),
rep(sample(x = nrow(Z.centers), size = nrow(Z.centers), replace = TRUE), fold-1))
} else if (nphases > nrow(Z.centers) & nphases %% nrow(Z.centers) != 0) {
fold<- ceiling(nphases / nrow(Z.centers)) #number of times to sample
tmp<- nphases %% nrow(Z.centers) #remainder
ind<- c(sample(x = nrow(Z.centers), size = nrow(Z.centers), replace = FALSE),
rep(sample(x = nrow(Z.centers), size = nrow(Z.centers), replace = TRUE), fold-2),
sample(x = nrow(Z.centers), size = tmp, replace = TRUE))
} else {
ind<- sample(x = nrow(Z.centers), size = nphases, replace = FALSE)
}
for (i in 1:nphases) {
if (i == 1) {
Z0 = Z0
} else {
Z0 = Z.list[[i - 1]][n,] %>% as.numeric()
}
Z.list[[i]] <- BRW.sim(n = n, a = a, b = b,
Z.center = Z.centers[ind[i],],
Z0 = Z0, rho = rho)
}
track<- do.call(rbind.data.frame, Z.list)
track$true.ac<- rep(ind, each = n)
return(track)
} |
07b933965c363a3ec0e812e9c8b9610f8eeaf40c | c4b86ad27a16524b978bcad04d3920b98bd77aba | /langaage.R | 6df05417a54b2443b56f333a67db2e2834277bac | [] | no_license | nripeshtrivedi/Conversion_analysis | 5ec900a97ba685720356cba1b7e1f7111a867b90 | d9c9ac2c7a4a2e4e46492c82f530036b256120de | refs/heads/master | 2020-12-24T07:04:03.717117 | 2016-11-20T07:26:29 | 2016-11-20T07:26:29 | 73,382,456 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,918 | r | langaage.R | library(rworldmap)
ddf = read.table(text="
country Number_of_Languages
ATG 1
AFG 3
ALB 11
DZA 2
AND 3
AGO 1
ARG 5
ARM 2
ASM 2
ABW 3
AUS 3
AUT 4
AZE 3
BHS 1
BHR 3
BGD 2
BRB 1
BLR 2
BEL 3
BLZ 3
BEN 1
BMU 2
BTN 1
BOL 2
BIH 3
BWA 1
BRA 4
VGB 1
BRN 3
BGR 3
BFA 1
MMR 1
BDI 3
KHM 2
CMR 2
CAN 2
CPV 1
CYM 1
CAF 1
TCD 3
CHL 1
CHN 4
CXR 3
CCK 2
COL 1
COM 2
COD 2
COG 1
COK 1
CRI 1
HRV 7
CUB 1
CYP 3
CZE 1
CIV 1
DNK 2
DJI 3
DMA 2
DOM 1
TLS 3
ECU 2
EGY 3
SLV 1
GNQ 2
ERI 1
EST 2
ETH 4
FLK 1
FJI 2
FIN 3
GUF 1
PYF 1
GAB 1
GMB 2
GEO 3
DEU 1
GHA 1
GIB 4
GRC 3
GRL 1
GRD 2
GLP 2
GUM 1
GTM 1
GGY 3
GIN 1
GUY 5
VAT 3
HND 2
HKG 2
HUN 1
ISL 3
IND 15
IDN 4
IRN 5
IRQ 3
IRL 3
ISR 3
ITA 3
JAM 2
JPN 1
JES 2
JOr 2
KAZ 2
KEN 1
KIR 1
KOR 1
PRK 2
KWT 2
KGZ 1
LAO 2
LAV 3
LBN 4
LSO 3
LBR 1
LBY 3
LIE 2
LTU 3
LUX 2
MAC 3
MKD 5
MDG 2
MYS 8
MDV 1
MLI 1
MLT 2
IMN 2
MTQ 2
MRT 1
MUS 3
MYT 1
MEX 1
FSM 1
MDA 3
MCO 3
MNG 1
MSR 1
MAR 2
MOZ 1
NAM 3
NRU 1
NPL 3
NLD 2
ANT 4
NCL 1
NZL 1
NIC 1
NER 2
NGA 4
NFK 1
MNP 2
NOR 3
OMN 17
PAK 5
PLW 5
PAN 2
PNG 1
PRY 1
PER 2
PHL 5
POL 1
PRT 1
PRI 2
QAT 1
ROU 3
RUS 5
RWA 3
REU 2
SHN 1
KNA 1
LCA 1
SPM 1
VCT 1
ESP 2
WSM 1
SMR 1
SAU 1
SEN 1
SYC 2
SLE 1
SGP 6
SVK 4
SVN 2
SLB 1
SOM 3
ZAF 1
LKA 1
SDN 2
FRA 2
SUR 4
SJM 2
SWZ 1
SWE 2
CHE 8
SYR 4
ZWE 2
ZMB 1
YEM 1
ESH 2
WLF 1
VGB 4
VIR 4
VNM 5
VEN 1
VUT 3
UZA 3
UZB 2
URY 2
USA 34
UK 3
ARE 5
UKR 5
UGA 3
TUV 1
TCA 1
TKM 2
TUR 5
TUN 2
TTO 5
TON 1
TKL 1
TGO 1
THA 2
TZA 3
TJK 1
TWN 2
STP 1
KGZ 1
", header=T)
#create a map-shaped window
mapDevice('x11')
#join to a coarse resolution map
spdf <- joinCountryData2Map(ddf, joinCode="NAME", nameJoinColumn="country")
mapParams<-mapCountryData(spdf, nameColumnToPlot="Number_of_Languages", catMethod="categorical",mapTitle="",colourPalette="terrain", addLegend=FALSE)
do.call(addMapLegendBoxes
,c(mapParams
,cex=0.8
,ncol=2
,x='bottomleft'
,title='Count of languages'))
|
cb42ca087077badb96138c96ddaec4d07f0c4302 | 4c48d5612451f1d35fd3c263bd2002d82af91653 | /ML/boosting.R | 1e114f92402daa50584b78e217ca767979bdb841 | [] | no_license | EvanOman/RProjects | d64a153f654fc268a82d774f5ec5f91b10a46ded | 9b05d3c9ff6a401e6f40d1b8a7f9069c280b7b29 | refs/heads/master | 2021-06-24T14:03:33.619988 | 2017-02-09T22:13:15 | 2017-02-09T22:13:15 | 34,800,987 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 962 | r | boosting.R | # Basic idea of boosting:
# 1. Take lots of possibly weak predictors
# 2. Weight them and add them up (kind of like bagging)
# 3. Get a stronger predictor
# More detail:
# 1. Start with a set of classifiers h_1,...,h_k
# 2. Create a classifier that combines classification functions: f(x) = sgn(sum_{t=1}^T alpha_t h_t (x))
# - Goal is to minimize the error
# - Iterative, slect one h at each step
# - Calculate weights based on errors
# - Upweight missed classifications and select next h
# Loading the wage data
rm(list = ls())
library(ISLR)
data(Wage)
library(ggplot2)
library(caret)
Wage <- subset(Wage, select = -c(logwage))
inTrain <- createDataPartition(y=Wage$wage,p=.7,list=FALSE)
training <- Wage[inTrain,]
testing <- Wage[-inTrain,]
# Fitting the model
modFit <- train(wage ~ ., method="gbm", data=training, verbose=FALSE)
print(modFit)
# Plot the results
pdf("boostingPlot.pdf")
qplot(predict(modFit,testing),wage,data=testing)
dev.off() |
fd35bddfc4cf9b2b22f4b7d618800996d2a6ceb4 | 3a41adda8d1e10543cd2d72cb9a0048f79d81baf | /PJI/DATA/best/best.R | f2ec18e6aa4c5287f1c2d5e59c71bc4594f39890 | [] | no_license | AntoineCanda/M1_S2 | 5a91dffcc591cbaf3b988bde7e26e895c59c9865 | ea592675667d9a1fe441c9cd287f2826cf4bc31a | refs/heads/master | 2021-01-22T03:49:29.103441 | 2017-05-25T13:33:21 | 2017-05-25T13:33:21 | 92,406,364 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 640 | r | best.R | args <- commandArgs(trailingOnly = TRUE)
fileName <- args[1]
threshold <- args[2]
data <- read.table(fileName, header=FALSE,sep=" ")
size <- nrow(data)
windowSize <- size / 10
conv <- function(){
for (i in 2:(size - windowSize)){
## cat(i, ":", i+windowSize, " = ")
if(IQR(data.matrix(data[i:(i+windowSize), 2])) < threshold){
return(i)
}
}
return(size)
}
specify_decimal <- function(x, k) format(round(x, k), nsmall=k)
beginConvergence <- conv()
accuracy <- specify_decimal(mean(sort(data.matrix(data[, 2]), TRUE)[1:10]), 5)
cat(fileName,";",beginConvergence,";",accuracy,"\n", sep="")
|
843598b20631365dd0c6ed705f46a3b00e08d701 | 021498dd1ed1eb755575e7dfbc8b8f9fae927831 | /man/ISOHierarchyLevel.Rd | 56fb1ac65c8a749c8c5b47b30807422e9256a861 | [] | no_license | 65MO/geometa | f75fb2903a4f3633a5fcdd4259fd99f903189459 | c49579eb5b2b994c234d19c3a30c5dad9bb25303 | refs/heads/master | 2020-04-08T12:22:44.690962 | 2018-11-22T22:51:57 | 2018-11-22T22:51:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 886 | rd | ISOHierarchyLevel.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ISOHierarchyLevel.R
\docType{class}
\name{ISOHierarchyLevel}
\alias{ISOHierarchyLevel}
\title{ISOHierarchyLevel}
\format{\code{\link{R6Class}} object.}
\usage{
ISOHierarchyLevel
}
\value{
Object of \code{\link{R6Class}} for modelling an ISO HierarchyLevel
}
\description{
ISOHierarchyLevel
}
\section{Fields}{
\describe{
\item{\code{value}}{}
}}
\section{Methods}{
\describe{
\item{\code{new(xml,value, description)}}{
This method is used to instantiate an ISOHierarchyLevel
}
}
}
\examples{
#possible values
values <- ISOHierarchyLevel$values(labels = TRUE)
#dataset scope
ds <- ISOHierarchyLevel$new(value = "dataset")
}
\references{
ISO 19115:2003 - Geographic information -- Metadata
}
\author{
Emmanuel Blondel <emmanuel.blondel1@gmail.com>
}
\keyword{ISO}
\keyword{hierarchyLevel}
|
29b1743b2ff68345d203df1fe6f6667561630785 | 83f721d3114f5cda645be927e1dd228fe843e84f | /Session_22/path_plot.R | 8bda395676bf0394fe33b3b4730010fbb59702d0 | [] | no_license | ahdvnd/cs112-swirl-courses | 41ad58a968dfdae235d480ef70d25f07658e59c3 | 0f155e921a8eee3af32c4e515e1b0526bb7c24fa | refs/heads/master | 2021-11-25T17:42:48.460660 | 2021-11-05T14:38:26 | 2021-11-05T14:38:26 | 249,019,001 | 1 | 6 | null | null | null | null | UTF-8 | R | false | false | 334 | r | path_plot.R | # path plot
par(mar=c(1,1,1,1))
path.plot(synth.res = synth.out,
dataprep.res = dataprep.out,
Ylab = c("real per-capita GDP (1986 USD, thousand)"),
Xlab = c("year"),
Ylim = c(0,13),
Legend = c("Basque country","synthetic Basque country"),
abline(v = 1969, col = "red")
)
|
2e4526bf7a3c2b19e58cdd911cf0c78de4b2fb01 | 96ee862a4c92ed6a97787c1521d64934794cae32 | /man/evall.Rd | 6822757dcdc4c1e6f8ab48f84c5e8153fa86059e | [] | no_license | cran/senstrat | 708a8881ec49580d03e11c7ee64b351ede60e3da | d7cecc6ddc58d4263c0b8dc789524398adf937b6 | refs/heads/master | 2021-01-01T06:20:01.588080 | 2017-07-16T20:01:17 | 2017-07-16T20:01:17 | 97,411,096 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,966 | rd | evall.Rd | \name{evall}
\alias{evall}
\title{
Compute expectations and variances for one stratum.
}
\description{
Of limited interest to most users, the evall() function plays an internal role in 2-sample and stratified sensitivity analyses. The expectation and variance returned by the evall() function are defined in the third paragraph of section 4, page 495, of Rosenbaum and Krieger (1990). The function evall() calls the function ev() to determine the expectation and variance of the test statistic for an unobserved covariate u with length(z)-m 0's followed by m 1's, doing this for m=1,...,length(z)-1.
}
\usage{
evall(sc, z, g, method)
}
\arguments{
\item{sc}{
A vector of scored outcomes for one stratum. For instance, for Wilcoxon's rank sum test,
these would be the ranks of the outcomes in the current stratum.
}
\item{z}{
Treatment indicators, with length(z)=length(sc). Here, z[i]=1 if i is treated and z[i]=0 if i is control.
}
\item{g}{
The sensitivity parameter \eqn{\Gamma}, where \eqn{\Gamma \ge 1}.
}
\item{method}{
If method="RK" or if method="BU", exact expectations and variances are used in a large sample approximation. Methods "RK" and "BU" should give the same answer, but "RK" uses formulas from Rosenbaum and Krieger (1990), while "BU" obtains exact moments for the extended hypergeometric distribution using the BiasedUrn package and then applies Proposition 20, page 155, section 4.7.4 of Rosenbaum (2002). In contrast, method="LS" does not use exact expectations and variances, but rather uses the large sample approximations in section 4.6.4 of Rosenbaum (2002). Finally, method="AD" uses method="LS" for large strata and method="BU" for smaller strata.
}
}
\details{
The evall() function is called by the sen2sample() function and the senstrat() function.
}
\value{
A data.frame with length(z)-1 rows and three columns. The first column, m, gives the number of 1's in the unobserved covariate vector, u. The second column, expect, and the third column, var, give the expectation and variance of the test statistic for this u.
}
\references{
Rosenbaum, P. R. and Krieger, A. M. (1990). Sensitivity of two-sample permutation inferences in observational studies. Journal of the American Statistical Association, 85, 493-498.
Rosenbaum, P. R. (2002). Observational Studies (2nd edition). New York: Springer. Section 4.6.
}
\author{
Paul R. Rosenbaum
}
\note{
The example is from Table 1, page 497, of Rosenbaum and Krieger (1990). The example is also Table 4.15, page 146, in Rosenbaum (2002). The example refers to Cu cells. The data are orignally from Skerfving et al. (1974).
}
\examples{
z<-c(rep(0,16),rep(1,23))
CuCells<-c(2.7, .5, 0, 0, 5, 0, 0, 1.3, 0, 1.8, 0, 0, 1.0, 1.8,
0, 3.1, .7, 4.6, 0, 1.7, 5.2, 0, 5, 9.5, 2, 3, 1, 3.5,
2, 5, 5.5, 2, 3, 4, 0, 2, 2.2, 0, 2)
evall(rank(CuCells),z,2,"RK")
}
\keyword{ htest }
\keyword{ robust }
|
96bea89f458af289bbad472ea8fe05bc9ba892a2 | 7bbbb8a9ad9e725cf1d49b5137da16e4e64ad512 | /man/mdt.feedback.no_score.Rd | 9422479b50ec7dddcd35281b06fe5c128dc2db8f | [
"MIT"
] | permissive | pmcharrison/mdt | 56c5d5c75516faf88a608cf12f2349259d6ae6dc | f86659c00c776e87bfc7e4d7302b796160d30bb8 | refs/heads/master | 2023-08-07T10:18:17.518197 | 2023-07-26T21:26:37 | 2023-07-26T21:26:37 | 138,870,343 | 0 | 6 | NOASSERTION | 2021-12-13T11:45:19 | 2018-06-27T11:07:07 | R | UTF-8 | R | false | true | 469 | rd | mdt.feedback.no_score.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/feedback.R
\name{mdt.feedback.no_score}
\alias{mdt.feedback.no_score}
\title{MDT feedback (no score)}
\usage{
mdt.feedback.no_score(dict = mdt::mdt_dict)
}
\arguments{
\item{dict}{The psychTestR dictionary used for internationalisation.}
}
\description{
Here the participant is given no feedback at the end of the test.
}
\examples{
\dontrun{
demo_mdt(feedback = mdt.feedback.no_score())}
}
|
ef9f0571d4a026340481eae76092ae374ea53832 | 8d34a5846b55474e1db54cc3595ac725b1a96404 | /R/lst.R | 99de8af541b70e9044eb84a7b556334bd20e13b9 | [] | permissive | federman/poorman | 6f1f76ea1c262a430dbd80b840897d4f1b603b76 | 3cc0a9920b1eb559dd166f548561244189586b3a | refs/heads/master | 2023-05-14T12:33:25.016104 | 2022-12-29T18:35:59 | 2022-12-29T18:35:59 | 243,791,745 | 0 | 0 | MIT | 2020-02-28T15:19:14 | 2020-02-28T15:19:13 | null | UTF-8 | R | false | false | 2,733 | r | lst.R | #' Build a list
#'
#' @description
#' `lst()` constructs a list, similar to [base::list()], but where components
#' are built sequentially. When defining a component, you can refer to components
#' created earlier in the call. `lst()` also generates missing names
#' automatically.
#'
#' @param ... Named or unnamed elements of a list. If the element is unnamed, its
#' expression will be used as its name.
#'
#' @return A named list.
#' @export
#' @examples
#' # the value of n can be used immediately in the definition of x
#' lst(n = 5, x = runif(n))
#'
#' # missing names are constructed from user's input
#' lst(1:3, z = letters[4:6], runif(3))
#'
#' a <- 1:3
#' b <- letters[4:6]
#' lst(a, b)
lst <- function(...) {
fn_call <- match.call()
list_to_eval <- as.list(fn_call)[-1]
out <- vector(mode = "list", length = length(list_to_eval))
names(out) <- names(list_to_eval)
exprs <- lapply(substitute(list(...)), deparse)[-1]
for (element in seq_along(list_to_eval)) {
value <- list_to_eval[[element]]
if (is.language(value)) {
# need to update the environment in which the values are obtained
# ex: lst(a = 1, a = a + 1, b = a), 'b' needs the updated value of 'a',
# not its initial value.
value <- eval(
value,
envir = if (length(out) == 0) {
list_to_eval
} else {
# restrict the environment to the previous elements of the list (and
# to the last value for each name if there are duplicated names)
drop_dup_list(out[1:(element - 1)])
}
)
}
if (is.null(value)) {
out[element] <- list(NULL)
} else {
out[[element]] <- value
}
# this naming part needs to happen at the end of the loop to avoid error
# with lst(NULL)
invalid_name <- is.null(names(out)[element]) ||
is.na(names(out)[element]) ||
names(out)[element] == ""
if (invalid_name) {
if (exprs[[element]] != "NULL" || (exprs[[element]] == "NULL" && is.null(out[[element]]))) {
names(out)[element] <- exprs[[element]]
}
}
}
out
}
#' Drop List Duplicated
#' If several elements of a list have the same name, only keep the last one with this name.
#' @examples
#' list(a = 1, a = 2, b = 1)
#' # list(a = 2, b = 1)
#' @noRd
drop_dup_list <- function(x) {
list_names <- names(x)
if (identical(list_names, unique(list_names))) return(x)
count <- table(list_names)
dupes <- names(count[count > 1])
uniques <- names(count[count == 1])
to_drop <- do.call(c, lapply(
dupes,
function(x) {
matches <- which(list_names == x)
matches[-length(matches)]
}
))
x[uniques] <- Filter(Negate(is.null), x[uniques])
return(x[-to_drop])
}
|
8aaf48822f0592bba0accb996a7d415bc21df561 | a26cce1c6ab0347cc7fb7baa3d8150a918223fde | /script.R | b1050a02a4c82020ae09145085313a6fcc63e93d | [] | no_license | salvadorENR/Primer_Parcial_R | 437bd9ba506992ec7c6ed066a4742d064edeb984 | 39d92efac645c8b29bf75b6e7c5240d26852fe9d | refs/heads/master | 2016-09-06T09:24:52.176428 | 2015-09-14T13:24:33 | 2015-09-14T13:24:33 | 42,383,016 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 609 | r | script.R | #solución de parcial
require(XML)
theURL <- "http://www.jaredlander.com/2012/02/another-kind-of-
super-bowl-pool/"
bowlPool <- readHTMLTable(theURL, which = 1, header = FALSE,
+ stringsAsFactors = FALSE)
bowlPool
theUrl <- "http://www.jaredlander.com/data/Tomato%20First.csv"
tomato <- read.table (file = theUrl, header = TRUE, sep = ",")
tomato
library(Hmisc)
mydataframe <- spss.get("empresas.sav", use.value.labels=TRUE)
mydataframe
summary(mydataframe$AGR)
obj1=mydataframe$AGR
obj1
coches=read.table(file = "coches.csv", header = TRUE, sep = ",")
coches
|
86e9224df443dbf9798ac105f8baaa28098a7eaf | 2270c222eeec55ff1d7e1b2a476c39e624f2140a | /Code/Fig7_table.R | 9438e8711d4ceca563479242d2d92d103109f30d | [] | no_license | halleybrantley/detrendify | 3fe3babb539f5994d500fcfd29d9833797b401f8 | 33ca04ad2d4ce7b011a7285701f40ed3318937a7 | refs/heads/master | 2020-08-24T11:00:42.542598 | 2019-11-20T22:20:14 | 2019-11-20T22:20:14 | 216,812,726 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,171 | r | Fig7_table.R | ################################################################################
# RMSEs from Simulation 1
################################################################################
library(tidyverse)
library(Cairo)
library(grid)
library(Hmisc)
rm(list=ls())
source("sim_generating_functions.R")
colPal <- c('#006d2c', '#2ca25f', '#66c2a4',
"#c2a5cf", "#9970ab", "#762a83")
text_size <- 14
tau <- c(0.01, 0.05, 0.25, 0.5, .75, 0.95, 0.99)
nSim <- 100
simDesigns <- c( "mixednorm", "shapebeta", "gaus")
methods <- c("detrend_eBIC", "detrend_SIC", "detrend_valid", "rqss", "npqw", "qsreg")
load("../SimResults/Sim1_MSE.RData")
MSEs_long <- MSEs %>% gather("tau", "MSE", -c("Design", "Sim", "Method", "n"))
MSEs_long$RMSE <- sqrt(MSEs_long$MSE)
summary_stats <-
MSEs_long %>% group_by(Method, tau, Design, n) %>%
summarise(
mean_mse = mean(RMSE),
sd_mse = sd(RMSE)/sqrt(nSim),
median_mse = median(RMSE),
mad_mse = median(abs(RMSE - median_mse))*1.482
) %>%
ungroup() %>%
mutate(tau_fac = tau,
tau = as.numeric(substr(tau_fac, 5, 10)),
Method = factor(Method, levels = methods))
summary_stats <- summary_stats %>% filter( tau > 0.01 & tau < 0.99)
summary_stats$value <- sprintf("%0.3f (%0.3f)", summary_stats$mean_mse,
summary_stats$sd_mse)
wide_stats <-
summary_stats %>%
select(Method, tau, Design, n, value) %>%
spread(tau, value) %>%
arrange(Design, n, Method)
wide_stats$Method <- as.character(str_replace(wide_stats$Method, "_", " "))
unique(wide_stats$Method)
latex(wide_stats %>% filter(Design=="gaus") %>% select(-n, -Design) ,
file = "Fig7_Gaussian.tex",
rowname = "",
title = '',
n.rgroup = c(6,6,6),
rgroup = c("n=300", "n=500", "n=1000"))
latex(wide_stats %>% filter(Design=="mixednorm") %>% select(-n, -Design) ,
file = "Fig7_mixednorm.tex",
rowname = "",
title = '',
n.rgroup = c(6,6,6),
rgroup = c("n=300", "n=500", "n=1000"))
latex(wide_stats %>% filter(Design=="shapebeta") %>% select(-n, -Design) ,
file = "Fig7_beta.tex",
rowname = "",
title = '',
n.rgroup = c(6,6,6),
rgroup = c("n=300", "n=500", "n=1000"))
p1 <- summary_stats %>%
filter(Design == "gaus") %>%
ggplot( aes(x = factor(n), y = mean_mse, col = Method)) +
geom_point(position = position_dodge(width = 0.5)) +
geom_linerange(aes(ymin = mean_mse - 2*sd_mse, ymax = mean_mse + 2*sd_mse),
position = position_dodge(width = 0.5))+
facet_grid(.~factor(tau), scales = "free")+
theme_bw() +
theme(text = element_text(size=text_size),
axis.text.x = element_text(size=(text_size-5)),
axis.title.y = element_text(margin =
margin(t = 0, r = 5, b = 0, l = 0)),
plot.title = element_text(size = text_size))+
scale_color_manual(values=colPal, breaks = methods) +
labs(x = "n", y="RMSE", title = "Gaussian")+
guides(col="none")+
ylim(c(0,.153))
p2 <- summary_stats %>%
filter(Design == "shapebeta") %>%
ggplot( aes(x = factor(n), y = mean_mse, col = Method)) +
geom_point(position = position_dodge(width = 0.5)) +
geom_linerange(aes(ymin = mean_mse - 2*sd_mse, ymax = mean_mse + 2*sd_mse),
position = position_dodge(width = 0.5)) +
facet_grid(.~factor(tau), scales = "free")+
theme_bw() +
theme(text = element_text(size=text_size),
axis.text.x = element_text(size=(text_size-5)),
plot.title = element_text(size = text_size)) +
scale_color_manual(values=colPal, breaks = methods) +
labs(x = "n", y="RMSE", title = "Beta") +
guides(col="none")+
ylim(c(0,.091))
p3 <- summary_stats %>%
filter(Design == "mixednorm") %>%
ggplot( aes(x = factor(n), y = mean_mse, col = Method)) +
geom_point(position = position_dodge(width = 0.5)) +
geom_linerange(aes(ymin = mean_mse - 2*sd_mse, ymax = mean_mse + 2*sd_mse),
position = position_dodge(width = 0.5))+
facet_grid(.~factor(tau), scales = "free")+
theme_bw() +
theme(text = element_text(size=text_size),
axis.text.x = element_text(size=(text_size-5)),
axis.title.y = element_text(margin =
margin(t = 0, r = 5, b = 0, l = 10)),
plot.title = element_text(size = text_size),
legend.position = "bottom")+
guides(col=guide_legend(nrow=2, byrow=TRUE)) +
scale_color_manual(values=colPal, breaks = methods) +
labs(x = "n", y="RMSE", title = "Mixed Normal") +
ylim(c(0, .4))
fig.layout<-grid.layout(nrow=3,ncol=1,
heights=c(2.8,2.8,4),
widths=c(7,7,7), default.units="null",
just=c("left","bottom"))
Cairo(file="../Manuscript/Figures/sim_metrics.png",
type="png",
dpi = 400,
unit = "in",
width=7.5, height=10)
pushViewport(viewport(layout=fig.layout))
print(p1, vp=viewport(layout.pos.row=1, layout.pos.col=1))
print(p2, vp=viewport(layout.pos.row=2,layout.pos.col=1))
print(p3, vp=viewport(layout.pos.row=3, layout.pos.col=1))
dev.off()
|
dc2a0544d43e33fd0e0c512182f69e71d432b536 | bc7da505db39e6e5b0dcbca9e9c935dd4ca34eb3 | /man/noising.Rd | 8906f8e96bdc6042d7d8c2a1e8edbafd029a8029 | [] | no_license | cran/PheVis | fba5649ea6b3abbfcd0a74b7c54afd5fd2b51dc9 | cc19834d117db49595edb989c89d29a4da74aa8d | refs/heads/master | 2023-03-09T14:13:42.362852 | 2021-02-23T08:40:21 | 2021-02-23T08:40:21 | 339,444,782 | 1 | 2 | null | null | null | null | UTF-8 | R | false | true | 310 | rd | noising.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/noising.R
\name{noising}
\alias{noising}
\title{noising}
\usage{
noising(X_boot, p = 0.3)
}
\arguments{
\item{X_boot}{matrix to perform noise on}
\item{p}{amount of noise}
}
\value{
A noised matrix
}
\description{
Noise a matrix
}
|
9c891113bb2592a9012db5f6e71a9267f3232e47 | b9ba5bdcbf41ffa1ca0d8b8c46d9bb5417dfe9cd | /Advance R.R | 79f5b6bce889379af5e9c6079ad63971fa47cc0e | [] | no_license | rajeshmalpani/datascience | 38948ca2850e77290ea40595893c7e3d25e16c58 | 3b047e27dba4ccfab759dae3440c7defd2393b1e | refs/heads/master | 2020-05-29T22:48:53.383823 | 2016-01-22T08:53:03 | 2016-01-22T08:53:03 | 31,932,763 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,232 | r | Advance R.R | ### Advanced R Programming Part I and II ####
url <- "http://www.jaredlander.com/2012/02/another-kind-of-super-bowl-pool/"
require(XML)
# read an HTML table
bowl <- readHTMLTable("http://www.jaredlander.com/2012/02/another-kind-of-super-bowl-pool/", header = FALSE, stringsAsFactors=FALSE, which=1)
bowl
### Chapter 1.2 - Use XPath for complex searches in HTML ####
address <-"http://www.menupages.com/restaurants/fiores-pizza/menu"
thePage <- readLines(address)
head(thePage)
require(XML)
pageRender <- htmlParse(thePage)
# exract street address from html page
address <- xpathApply(pageRender, "//li[@class='address adr']/span[@class = 'addr street-address']", fun=xmlValue)[[1]]
address
# exract citt from html page
city <- xpathApply(pageRender, "//li[@class='address adr']/span/span[@class = 'locality']", fun=xmlValue)[[1]]
city
headers <- xpathSApply(pageRender, "//*[@id='restaurant-menu']/h3", xmlValue)
headers
items <- xpathSApply(pageRender, "//table[starts-with(@class, 'prices-')]")
items
items <- lapply(items, readHTMLTable, stringsAsFactors=FALSE)
require(plyr)
menu <- "http://www.menupages.com/restaurants/all-areas/all-neighborhoods/pizza/"
doc <- htmlParse(menu)
doc
# parse Name and Link of the list of restaurants returned from menu url
placeNameLink <- xpathApply(doc, "//table/tr/td[@class='name-address']/a[@class='link']",
fun=function(x){ c(Name=xmlValue(x, recursive = FALSE), Link=xmlAttrs(x)[2]) })
placeNameLink <- ldply(placeNameLink) # convert the list object to a dataframe
head(placeNameLink)
### Chapter 1.3 - Use xmlToList for easier parsing ####
teaFile <- "http://www.jaredlander.com/data/SocialComments.xml"
require(XML)
teaParsed <- xmlToList(teaFile)
length(teaParsed) # length of lst
str(teaParsed) # structure of list
teaParsed[[1]][[1]]$id # to extract id tag data of first child within first child
teaParsed[[1]][[1]]$author$name # to extract name tag within author tag of first child within first child
teaParsed[[1]][[1]]$published # to extract published tag data of first child within first child
teaParsed[[1]][[1]]$content$.attrs # to extract all attributes within a tag
teaParsed[[1]][[1]]$content$.attrs[["sentimentScore"]] # to extract a given attribute within a tag
### Chap 4.1 - Build a recommendation engine with RecommenderLab ####
setwd("/Users/rajeshmalpani/workspace/Data Science/JaredLander/data")
require(utils)
download.file("http://files.grouplens.org/datasets/movielens/ml-100k.zip", destfile="/Users/rajeshmalpani/workspace/Data Science/JaredLander/data/ml-100k.zip")
unzip("ml-100k.zip", exdir="movies")
dir("ml-100k")
ratings <- read.table("ml-100k/u.data", header = FALSE, sep = "\t", col.names = c("UserID", "MovieID", "Rating", "Timetamp"))
head(ratings)
ratings$Timetamp <- as.POSIXct(ratings$Timetamp, origin = "1970-01-01") # Convert data to Posix Format
require(reshape2)
ratingsMat <- dcast(UserID ~ MovieID, data=ratings, value.var = "Rating")
head(ratingsMat)
names(ratingsMat)
require(useful)
corner(ratingsMat)
rownames(ratingsMat) <- sprintf("User%s", ratingsMat$UserID) # assign the rownname as UserId#
ratingsMat$UserID <- NULL # remove UserID column from RatingsMat
colnames(ratingsMat) <- sprintf("Movie%s", colnames(ratingsMat)) # assign column name as Movie#
corner(ratingsMat)
ratingsMat <- as.matrix(ratingsMat) # convert data to a matrix
install.packages("recommenderlab");
require(recommenderlab)
rateMat <- as(ratingsMat, "realRatingMatrix")
head(as(rateMat, "data.frame"))
as(rateMat, "list")[[1]]
image(rateMat)
hist(getRatings(normalize(rateMat)), breaks = 100)
itemRec <- Recommender(rateMat, method="POPULAR") # Get recommendation based on POPULAR
itemRec
getModel(itemRec) # get the model built by recommender
### Chap 4.2 - Mine text with RTextTools ####
install.packages("RTextTools")
require(RTextTools)
data("NYTimes", package="RTextTools")
head(NYTimes); dim(NYTimes)
# create a sparse matrix of title with option to removeNumbers, and stem words and removeSparseTerms
timesMat <- create_matrix(NYTimes$Title, removeNumbers = TRUE, stemWords = TRUE, removeSparseTerms = .998)
timesMat
# create a training and test set
container <- create_container(timesMat, labels=NYTimes$Topic.Code, trainSize = 1:2500, testSize = 2501:NROW(NYTimes), virgin = FALSE)
# build a SVM & Elastic Net
SVM <- train_model(container = container, "SVM") # SVM Model
GLMNET <- train_model(container = container, "GLMNET") # Elastic Net model
SVM_Classify <- classify_model(container, SVM) # Create a classification from the model
GLMNET_Classify <- classify_model(container, GLMNET) # Create a classification from the model
analytis <- create_analytics(container, cbind(SVM_Classify, GLMNET_Classify)) # create analytics from the model classification
summary(analytis)
### Chap 5.1 Network Analysis / Get started with igraph ####
update.packages("igraph")
require(igraph);
require(tcltk)
g <- graph(c(1,2, 1,3, 2,3, 3,5), n=5 ) # demo of an igraph
plot(g)
g <- graph.tree(40, 4)
# different ways to layout a graph
plot(g)
plot(g, layout=layout.circle)
plot(g, layout=layout.fruchterman.reingold)
plot(g, layout=layout.graphopt)
plot(g, layout=layout.kamada.kawai)
# tcltk, Rcmdr & rgl hangs RStudio, troubleshoot later
tkplot(g, layout=layout.kamada.kawai)
l <- layout.kamada.kawai(g)
rglplot(g, layout=l)
g <- graph(c(1,1, 1,2, 1,3, 2,3, 4,5), n=5 ) # demo of an igraph
plot(g)
### Chap 5.2 - Read edgelists ####
jets <- read.table("http://www.jaredlander.com/data/routes.csv", sep=",", header = TRUE, stringsAsFactors = FALSE)
head(jets)
flights <- graph.data.frame(jets, directed = TRUE) # build an directed edgelist
print.igraph(flights, full = TRUE) # prints the edgelists by location
plot(flights)
E(flights) # to get a list of edges of this edgelist
V(flights) # to get a list of vertices for the edgelist
vcount(flights) # count of # of vertices in edgelist
ecount(flights) # count of # of edges in edgelist
flights2 <- as.undirected(flights) # create an undirected edgelist
print.igraph(flights2, full = TRUE) # print the full edgelist of undirected edgelist
plot(flights2)
E(flights2)
vcount(flights2) # count of # of vertices in edgelist
ecount(flights2) # count of # of edges in edgelist
plot(flights, layout=layout.fruchterman.reingold, edge.width=E(flights)$Time/100 ) # weighted graph by flight time
plot(flights, layout=layout.kamada.kawai)
### Chap 5.3 - Common graph metrics ####
average.path.length(flights)
diameter(flights)
farthest.nodes(flights) # gives a vertex list of farthest nodes
V(flights)[farthest.nodes(flights)] # get the name of the farthest nodes
largest.cliques(flights) # get the flight path with largest clique i.e. distance
V(flights)[largest.cliques(flights)[[1]]] # get the first named vertices of largest clique route
V(flights)[largest.cliques(flights)[[2]]] # get the second named vertices of largest clique route
transitivity(flights)
degree(flights) # degree of each of the
hist(degree(flights)) # frequency of degrees of flights
shortest.paths(flights) # shortest segment in terms of hops for a vertex to another
heatmap(shortest.paths(flights)) # a heatmap showing connectivity between airports
V(flights)[degree(flights) >= 30]$color <- "green" # to color code flights with degree Greater than Equal to 30 to green
V(flights)[degree(flights) <= 14]$color <- "red" # to color code flights with degree Less than Equal to 14 to red
plot(flights) # when plotted the flights are now color coded per above assignment
plot(flights, edge.width=E(flights)$Time) # this messes graph as width is taken literally
plot(flights, edge.width=E(flights)$Time/100) # scaled version gives a better visua
flights3 <- flights
E(flights3)$weight <- E(flights)$Time
heatmap(shortest.paths(flights3)) # a heatmap showing connectivity between airports that is weighted by time
shortest.paths(flights3) # as it is weighted now, it gives a measure of time, instead of hops
|
4d9b1c4b17f7d74a36fcff1545798b787e3f4e12 | e5e00467881cdb38233b20d1e781611212f66bd7 | /R/support_funcs.R | 4ad5eefefa70b0f444a64479747d4fddde871bd6 | [] | no_license | dpritchard/takiwaR | 8ec7c9b7e5e52ecef3e5b6acf902dda50bd5041c | 8f26dba56cb24683f7a2af0c8b69fe48b0c9ae5d | refs/heads/master | 2021-01-18T21:36:55.920324 | 2018-05-19T03:56:21 | 2018-05-19T03:56:21 | 26,068,132 | 1 | 0 | null | 2017-01-20T01:06:03 | 2014-11-02T01:15:56 | R | UTF-8 | R | false | false | 2,946 | r | support_funcs.R | cm2in <- function(x){
cm <- x*0.393701
return(cm)
}
compare <- function(v){
first <- v[1]
rest <- as.list(v[-1])
res <- sapply(rest, FUN=function(z){ identical(z, first) })
return(all(res))
}
make_key <- function(string, subs = "takR"){
key <- as.character(string)
key <- stringr::str_trim(key)
key <- stringr::str_replace_all(key, "\\W", "_") # Find anything that is not a word and replace
key <- stringr::str_replace_all(key, "\\_{2,}", "_") # Replace multiple underscores
key <- stringr::str_trim(key)
#key <- make.names(key)
key <- make.unique(key, sep="_")
key <- stringr::str_to_lower(key)
key <- stringr::str_replace(key, "^\\_", paste0(subs, "_")) # Replace underscores at the beginning with takR_
key <- stringr::str_replace(key, "^(\\d)", paste0(subs, "_\\1")) # Replace numbers at the beginning with takR_#
return(key)
}
se <- function(x, na.rm=FALSE){
if (!is.vector(x)){
stop("'x' must be a vector. See ?se for further information.")
}
if(na.rm){
se <- sd(x, na.rm=TRUE)/sqrt(length(na.omit(x)))
} else {
se <- sd(x)/sqrt(length(x))
}
return(se)
}
rnt <- function(min = 0, max = 30, by = 1, nrow = 10, ncol = 10,
rowpref = "T", colpref = "Q"){
samples <- NULL
for(a in 1:nrow){
s1 <- sort(sample(seq(from = min, to = max, by = by), ncol))
samples <- rbind(samples, s1)
}
rownames(samples) <- paste0("T", 1:nrow)
colnames(samples) <- paste0("Q", 1:ncol)
return(samples)
}
is_zero <- function(x, strict_int = FALSE){
if(strict_int){
return(x == 0L)
} else {
return(abs(x) <= .Machine$double.eps)
}
}
all_is_na <- function(x){
allis <- all(is.na(x))
return(allis)
}
map_attributes <- function(x, out, regex = "^(takRmeta_|takRsec_)") {
# Add back attributes beginning with "takRmeta_" or "takRsec_" from `x` to `out`
indx <- stringr::str_detect(names(attributes(x)), regex)
if(any(indx)){
attr_names <- names(attributes(x))[indx]
for(a in 1:length(attr_names)){
attr(out, attr_names[a]) <- attr(x, attr_names[a])
}
}
return(out)
}
sum_on_col <- function(x){
# Sum columns with matching names
# This works equally well if x is a list becuase melt operates by melting the components of a list...
x_m <- reshape2::melt(x, as.is=TRUE)
x_m[is.na(x_m$value)] <- 0
# Cast the data as an array that is row-by-col, summing accross matches....
x_out <- reshape2::acast(x_m, Var1~Var2, fun.aggregate=sum)
if(inherits(x, "takRwide")){
# If it came in as a takRwide, send it back as the same....
class(x_out) <- class(x)
} else {
# Otherwise, send back a takRwide object
class(x_out) <- c("takRwide", class(x_out))
}
# Map any "takR*" attributes
x_out <- map_attributes(x, x_out)
return(x_out)
} |
e352f47415df7b946c1f2ca0446068d500243d42 | 122659e9874d73b463cbb714db795f195f154a9b | /correlateLhsColumns.r | ac6686c73f7225292a3088f80e93797fb993e504 | [] | no_license | bertcarnell/Rsandbox | ac7e808b9446422cbc40e231f3b17b79ad87a35a | 11ef0962b9504a811bf5aa8aa064ddee0d059d75 | refs/heads/master | 2021-06-05T15:11:06.610766 | 2018-11-24T23:48:37 | 2018-11-24T23:48:37 | 5,023,424 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 705 | r | correlateLhsColumns.r | require(lhs)
correlateLhsColumns <- function(targetSample, correlatedSample, rho, permutations)
{
P <- sapply(1:permutations, function(x) order(runif(length(targetSample))))
Z <- apply(P, 2, function(x) correlatedSample[x])
cors <- apply(Z, 2, function(x) cor(x, targetSample))
ind <- which.min((cors - rho)^2)
tempold <- (cor(targetSample, correlatedSample) - rho)^2
tempnew <- (cor(targetSample, Z[,ind]) - rho)^2
if (tempold < tempnew) return(correlatedSample)
else return(Z[,ind])
}
set.seed(1976)
X <- randomLHS(100, 2)
Y <- X
Y[,1] <- qnorm(X[,1], 2, 3)
Y[,2] <- qnorm(X[,2], 4, 8)
cor(Y)
cov(Y)
Y[,2] <- correlateLhsColumns(Y[,1], Y[,2], 0.4, 10000)
cor(Y)
cov(Y)
|
b023b4a69bd8a39d5fb8bdc5272dc9357aa7db9f | 906e6bbbda8b4f46a6efaf31abbb318180a9c536 | /R/nextItem.R | 3174b05bee1e2635b72c79a6f64373390f06927e | [] | no_license | cran/catR | 6c8dffc1a1027b3883f5ffcd4ec5b3d0f4017fed | 112a2efb6621fed446a5df4b1467e4bfe937ab81 | refs/heads/master | 2022-07-10T15:36:37.953792 | 2022-06-24T07:00:02 | 2022-06-24T07:00:02 | 17,694,983 | 3 | 3 | null | null | null | null | UTF-8 | R | false | false | 15,593 | r | nextItem.R | nextItem<-function (itemBank, model = NULL, theta = 0, out = NULL, x = NULL,
criterion = "MFI", method = "BM", priorDist = "norm", priorPar = c(0,
1), D = 1, range = c(-4, 4), parInt = c(-4, 4, 33), infoType = "observed",
randomesque = 1, random.seed = NULL, rule = "length", thr = 20,
SETH = NULL, AP = 1, nAvailable = NULL, maxItems = 50, cbControl = NULL,
cbGroup = NULL)
{
crit <- switch(criterion, MFI = "MFI", bOpt = "bOpt", MLWI = "MLWI",
MPWI = "MPWI", MEI = "MEI", MEPV = "MEPV", random = "random",
progressive = "progressive", proportional = "proportional",
KL = "KL", KLP = "KLP", thOpt = "thOpt", GDI = "GDI",
GDIP = "GDIP")
if (is.null(crit))
stop("invalid 'criterion' name", call. = FALSE)
if (!is.null(model)) {
mod <- switch(model, GRM = 1, MGRM = 2, PCM = 3, GPCM = 4,
RSM = 5, NRM = 6)
if (is.null(mod))
stop("invalid 'model' type!", call. = FALSE)
}
if (is.null(cbControl))
OUT <- out
else {
if (is.null(cbGroup))
stop("'cbGroup' argument must be provided for content balancing!",
call. = FALSE)
if (sum(cbControl$props) != 1)
cbControl$props <- cbControl$props/sum(cbControl$props)
nrGroup <- length(cbControl$names)
if (is.null(out))
empProp <- rep(0, nrGroup)
else {
empProp <- NULL
for (i in 1:nrGroup) empProp[i] <- length(out[cbGroup[out] ==
cbControl$names[i]])
empProp <- empProp/sum(empProp)
}
thProp <- cbControl$props
if (min(empProp) == 0) {
indGroup <- (1:nrGroup)[empProp == 0]
if (!is.null(random.seed))
set.seed(random.seed)
selGroup <- ifelse(length(indGroup) == 1, indGroup,
sample(indGroup, 1))
}
else {
indGroup <- (1:nrGroup)[(thProp - empProp) == max(thProp -
empProp)]
if (!is.null(random.seed))
set.seed(random.seed)
selGroup <- ifelse(length(indGroup) == 1, indGroup,
sample(indGroup, 1))
}
OUT <- unique(c(out, (1:length(cbGroup))[cbGroup != cbControl$names[selGroup]]))
}
if (!is.null(nAvailable)) {
discard <- unique(c(OUT, which(nAvailable == 0)))
OUT <- discard
}
if (crit == "MFI") {
items <- rep(1, nrow(itemBank))
items[OUT] <- 0
info <- Ii(theta, itemBank, model = model, D = D)$Ii
ranks <- rank(info)
nrIt <- min(c(randomesque, sum(items)))
keepRank <- sort(ranks[items == 1], decreasing = TRUE)[1:nrIt]
keep <- NULL
for (i in 1:length(keepRank)) keep <- c(keep, which(ranks ==
keepRank[i] & items == 1))
if (!is.null(random.seed))
set.seed(random.seed)
select <- ifelse(length(keep) == 1, keep, sample(c(keep),
1))
res <- list(item = select, par = itemBank[select, ],
info = info[select], criterion = criterion, randomesque = randomesque,
name=NULL)
}
if (crit == "bOpt") {
if (!is.null(model))
stop("bOpt's rule cannot be considered with polytomous items",
call. = FALSE)
items <- rep(1, nrow(itemBank))
items[OUT] <- 0
distance <- abs(itemBank[, 2] - theta)
ranks <- rank(distance)
ranks[OUT] <- -1
nrIt <- min(c(randomesque, sum(items)))
keepRank <- sort(ranks[items == 1], decreasing = FALSE)[1:nrIt]
keepRank <- unique(keepRank)
keep <- NULL
for (i in 1:length(keepRank)) keep <- c(keep, which(ranks ==
keepRank[i] & items == 1))
if (!is.null(random.seed))
set.seed(random.seed)
select <- ifelse(length(keep) == 1, keep, sample(keep,
1))
res <- list(item = select, par = itemBank[select, ],
info = distance[select], criterion = criterion, randomesque = randomesque,
name=NULL)
}
if (crit == "MLWI" | crit == "MPWI") {
if (length(out) == 1)
par <- rbind(itemBank[out, ])
else par <- itemBank[out, ]
ITEMS <- rep(1, nrow(itemBank))
ITEMS[OUT] <- 0
likInfo <- rep(0, nrow(itemBank))
for (i in 1:nrow(itemBank)) {
if (ITEMS[i] == 1)
likInfo[i] <- MWI(itemBank, i, x, it.given = par,
model = model, type = criterion, lower = parInt[1],
upper = parInt[2], nqp = parInt[3], priorDist = priorDist,
priorPar = priorPar, D = D)
}
likVal <- sort(likInfo, decreasing = TRUE)[min(c(randomesque,
sum(ITEMS)))]
keep <- (1:length(ITEMS))[likInfo >= likVal]
if (!is.null(random.seed))
set.seed(random.seed)
select <- ifelse(length(keep) == 1, keep, sample(keep,
1))
res <- list(item = select, par = itemBank[select, ],
info = likInfo[select], criterion = criterion, randomesque = randomesque,
name=NULL)
}
if (crit == "KL" | crit == "KLP") {
if (length(out) == 1)
par <- rbind(itemBank[out, ])
else par <- itemBank[out, ]
ITEMS <- rep(1, nrow(itemBank))
ITEMS[OUT] <- 0
klvalue <- rep(0, nrow(itemBank))
L <- function(th, r, param) prod(Pi(th, param, D = D)$Pi^r *
(1 - Pi(th, param, D = D)$Pi)^(1 - r))
X <- seq(from = parInt[1], to = parInt[2], length = parInt[3])
LL <- function(th, r, param, model, D = D) {
if (dim(param)[1] == 0)
res <- 1
else {
prob <- Pi(th, param, model = model, D = D)$Pi
res <- 1
for (i in 1:length(r)) res <- res * prob[i, r[i] +
1]
}
return(res)
}
if (is.null(model))
LF <- sapply(X, L, x, par)
else LF <- sapply(X, LL, x, par, model = model, D = D)
for (i in 1:nrow(itemBank)) {
if (ITEMS[i] == 1)
klvalue[i] <- KL(itemBank, i, x, it.given = par,
model = model, theta = theta, type = criterion,
lower = parInt[1], upper = parInt[2], nqp = parInt[3],
priorDist = priorDist, priorPar = priorPar,
lik = LF, X = X, D = D)
}
klVal <- sort(klvalue, decreasing = TRUE)[min(c(randomesque,
sum(ITEMS)))]
keep <- (1:length(ITEMS))[klvalue >= klVal]
if (!is.null(random.seed))
set.seed(random.seed)
select <- ifelse(length(keep) == 1, keep, sample(keep,
1))
res <- list(item = select, par = itemBank[select, ],
info = klvalue[select], criterion = criterion, randomesque = randomesque,
name=NULL)
}
if (crit == "GDI" | crit == "GDIP") {
if (length(out) == 1)
par <- rbind(itemBank[out, ])
else par <- itemBank[out, ]
ITEMS <- rep(1, nrow(itemBank))
ITEMS[OUT] <- 0
gdivalue <- rep(0, nrow(itemBank))
L <- function(th, r, param) prod(Pi(th, param, D = D)$Pi^r *
(1 - Pi(th, param, D = D)$Pi)^(1 - r))
X <- seq(from = parInt[1], to = parInt[2], length = parInt[3])
LLL <- function(th, r, param, model, D = 1) {
if (dim(param)[1] == 0)
res <- 1
else {
prob <- Pi(th, param, model = model, D = D)$Pi
res <- 1
for (i in 1:length(r)) res <- res * prob[i, r[i] +
1]
}
return(res)
}
if (is.null(model))
LF <- sapply(X, L, x, par)
else LF <- sapply(X, LLL, x, par, model = model, D = D)
for (i in 1:nrow(itemBank)) {
if (ITEMS[i] == 1)
gdivalue[i] <- GDI(itemBank, i, x, it.given = par,
model = model, type = criterion, lower = parInt[1],
upper = parInt[2], nqp = parInt[3], priorDist = priorDist,
priorPar = priorPar, lik = LF, X = X, D = D)
}
gdiVal <- sort(gdivalue, decreasing = TRUE)[min(c(randomesque,
sum(ITEMS)))]
keep <- (1:length(ITEMS))[gdivalue >= gdiVal]
if (!is.null(random.seed))
set.seed(random.seed)
select <- ifelse(length(keep) == 1, keep, sample(keep,
1))
res <- list(item = select, par = itemBank[select, ],
info = gdivalue[select], criterion = criterion, randomesque = randomesque,
name=NULL)
}
if (crit == "MEI") {
items <- rep(1, nrow(itemBank))
items[OUT] <- 0
infos <- rep(0, length(items))
for (i in 1:length(items)) {
if (items[i] > 0)
infos[i] <- MEI(itemBank, item = i, x = x, theta = theta,
it.given = itemBank[out, ], model = model,
method = method, priorDist = priorDist, priorPar = priorPar,
D = D, range = range, parInt = parInt, infoType = infoType)
}
infoVal <- sort(infos, decreasing = TRUE)[min(c(randomesque,
sum(items)))]
keep <- (1:nrow(itemBank))[infos >= infoVal]
if (!is.null(random.seed))
set.seed(random.seed)
select <- ifelse(length(keep) == 1, keep, sample(keep,
1))
res <- list(item = select, par = itemBank[select, ],
info = infos[select], criterion = criterion, randomesque = randomesque,
name=NULL)
}
if (crit == "MEPV") {
items <- rep(1, nrow(itemBank))
items[OUT] <- 0
epvs <- rep(1000, length(items))
for (i in 1:length(items)) {
if (items[i] > 0)
epvs[i] <- EPV(itemBank, item = i, x = x, theta = theta,
it.given = itemBank[out, ], model = model,
priorDist = priorDist, priorPar = priorPar,
D = D, parInt = parInt)
}
epVal <- sort(epvs)[min(c(randomesque, sum(items)))]
keep <- (1:nrow(itemBank))[epvs <= epVal]
if (!is.null(random.seed))
set.seed(random.seed)
select <- ifelse(length(keep) == 1, keep, sample(keep,
1))
res <- list(item = select, par = itemBank[select, ],
info = epvs[select], criterion = criterion, randomesque = randomesque,
name=NULL)
}
if (crit == "random") {
items <- rep(1, nrow(itemBank))
items[OUT] <- 0
gen <- as.integer(runif(1, 0, 1) * (sum(items))) + 1
ind <- (1:nrow(itemBank))[items > 0][gen]
res <- list(item = ind, par = itemBank[ind, ], info = NA,
criterion = criterion, randomesque = randomesque,
name=NULL)
}
if (crit == "progressive") {
items_administered <- length(out)
items <- rep(1, nrow(itemBank))
items[OUT] <- 0
info <- Ii(theta, itemBank, model = model, D = D)$Ii
itemMaxInfo <- max(info[items == 1])
randomValues <- runif(length(items), 0, itemMaxInfo)
wq <- 0
if (rule == "precision") {
infostop <- (1/thr)^2
cuminfo <- (1/SETH)^2
if (items_administered > 0)
wq <- max(cuminfo/infostop, items_administered/(maxItems -
1))^AP
}
if (rule == "length") {
if (items_administered > 0) {
numerador <- sum((1:items_administered)^AP)
denominador <- sum((1:(thr - 1))^AP)
wq <- numerador/denominador
}
}
funcPR <- info * wq + randomValues * (1 - wq)
funcPR[OUT] <- 0
keep <- which(funcPR == max(funcPR))
if (!is.null(random.seed))
set.seed(random.seed)
select <- ifelse(length(keep) == 1, keep, sample(keep,
1))
res <- list(item = select, par = itemBank[select, ],
info = info[select], criterion = criterion, randomesque = randomesque,
name=NULL)
}
if (crit == "proportional") {
items_administered <- length(out)
items <- rep(1, nrow(itemBank))
items[OUT] <- 0
wq <- 0
if (rule == "precision") {
infostop <- (1/thr)^2
cuminfo <- (1/SETH)^2
if (items_administered > 0)
wq <- infostop * max(cuminfo/infostop, items_administered/(maxItems -
1))^AP
}
if (rule == "length")
if (items_administered > 0) {
numerador <- sum((1:items_administered)^AP)
denominador <- sum((1:(thr - 1))^AP)
wq <- thr * numerador/denominador
}
info <- Ii(theta, itemBank, model = model, D = D)$Ii
infoPR <- info^wq
infoPR[OUT] <- 0
totalInfoPR <- sum(infoPR[items == 1])
probSelect <- infoPR/totalInfoPR
if (!is.null(random.seed))
set.seed(random.seed)
select <- sample(1:length(items), size = 1, prob = probSelect)
res <- list(item = select, par = itemBank[select, ],
info = info[select], criterion = criterion, randomesque = randomesque,
name=NULL)
}
if (crit == "thOpt") {
if (!is.null(model))
stop("'thOpt' rule cannot be considered with polytomous items",
call. = FALSE)
items <- rep(1, nrow(itemBank))
items[OUT] <- 0
u <- -3/4 + (itemBank[, 3] + itemBank[, 4] + -2 * itemBank[,
3] * itemBank[, 4])/2
v <- (itemBank[, 3] + itemBank[, 4] - 1)/4
xstar <- 2 * sqrt(-u/3) * cos(acos(-v * sqrt(-27/u^3)/2)/3 +
4 * pi/3) + 1/2
thstar <- itemBank[, 2] + log((xstar - itemBank[, 3])/(itemBank[,
4] - xstar))/(D * itemBank[, 1])
distance <- abs(thstar - theta)
ranks <- rank(distance)
ranks[OUT] <- -1
nrIt <- min(c(randomesque, sum(items)))
keepRank <- sort(ranks[items == 1], decreasing = FALSE)[1:nrIt]
keepRank <- unique(keepRank)
keep <- NULL
for (i in 1:length(keepRank)) {
keep <- c(keep, which(ranks == keepRank[i]))
}
if (!is.null(random.seed))
set.seed(random.seed)
select <- ifelse(length(keep) == 1, keep, sample(keep,
1))
res <- list(item = select, par = itemBank[select, ],
info = distance[select], criterion = criterion, randomesque = randomesque,
name=NULL)
}
if (is.null(cbControl))
res[[7]] <- res[[8]] <- res[[9]] <- NA
else {
res[[7]] <- empProp
postProp <- NULL
for (i in 1:nrGroup) postProp[i] <- length(c(res$item,
out)[cbGroup[c(res$item, out)] == cbControl$names[i]])
res[[8]] <- postProp/sum(postProp)
res[[9]] <- thProp
}
names(res)[7:9] <- c("prior.prop", "post.prop", "cb.prop")
if (!is.null(row.names(itemBank)))
res$name <- row.names(itemBank)[res$item]
set.seed(NULL)
return(res)
}
|
7fa5d7d5255da0167ed234614475c7e2e9651a0f | 568a4c35c73e6a7046c19aa9c8340c266972a10b | /man/nwos_estimates_add_tele.Rd | 9ede2065bec25e985adc69f8e476c77834875fa9 | [] | no_license | jfontestad/nwos | 7a0596f7ee81e26d26651e1435d3bf81e16a1e99 | f71969b594a07710719ac97add8e5e7fb4ef479d | refs/heads/master | 2023-04-11T16:16:20.985189 | 2021-05-03T18:09:00 | 2021-05-03T18:09:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 770 | rd | nwos_estimates_add_tele.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nwos_estimates_add_tele.R
\name{nwos_estimates_add_tele}
\alias{nwos_estimates_add_tele}
\title{Add TELE Variables to NWOS Dataset}
\usage{
nwos_estimates_add_tele(x, data = QUEST_WIDE)
}
\arguments{
\item{x}{list number. Only applicable if is data is a list of data frames, instead of a single data frame. This used mainly for apply functions.}
\item{data}{data frame or list of data frames}
}
\description{
Add variables to an NWOS dataframe
}
\details{
TELE_ATT: 1= Woodland retreat; 2 = Workign the land; 3 = Supplemental income; 4 = Uninvolved
TELE_PRIME: 1= Model; 2 = Prime; 3 = Defector; 4 = Write-off
}
\examples{
nwos_estimates_add_tele(x = 1 , data = QUEST_LIST)
}
\keyword{nwos}
|
bcba19b5a6da459423b06e4dc1390e45c466138e | c0f2e73c9ada0d7e9833c9b33ead06726c90d9d0 | /R/metropolis_z.R | 4429ec85e2d2bab6ad62f94a91f644c2735d7af3 | [] | no_license | BrandonEdwards/spmm | d0525da17cc2616848bf84e10c189bdda6bd7d88 | 5ead1a57bfae0b3156cc4052a3e46e755fe665a9 | refs/heads/master | 2020-12-30T00:02:01.813296 | 2020-04-08T21:47:55 | 2020-04-08T21:47:55 | 238,786,741 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,945 | r | metropolis_z.R | #' Metropolis Z
#'
#' Does M-H algo for Z, the allocation vector
#'
#' @importFrom stats rmultinom
#'
metropolis_z <- function(Y = NULL,
Z = NULL,
alloc = NULL,
X = NULL,
at_risk = NULL,
disease = NULL,
region = NULL,
Beta = NULL,
phi = NULL,
pi = NULL,
N = NULL)
{
C <- dim(Z)[2]
Z_proposed <- matrix(0, nrow = N, ncol = C)
# N_u <- N - sum(alloc)
# repeat{
# n_u <- round(runif(C-1, min = 0, max = (N_u - 5)))
# if (sum(n_u) <= N_u) break
# }
# n_u[C] <- N_u - sum(n_u)
for (k in 1:N)
{
if (alloc[k] == 1)
{
Z_proposed[k, disease[k]] <- 1
}else
{
r_allocation <- as.vector(t(rmultinom(1, 1, pi)))
Z_proposed[k,] <- r_allocation
#n_u <- n_u - r_allocation
}
}
#pi_proposed <- colSums(Z_proposed) / sum(colSums(Z_proposed))
#print(pi_proposed)
u <- runif(1)
loglik_1 <- loglik(Z = Z_proposed,
Y = Y,
X = X,
disease = disease,
region = region,
Beta = Beta,
at_risk = at_risk,
phi = phi,
pi = pi)
loglik_2 <- loglik(Z = Z,
Y = Y,
X = X,
disease = disease,
region = region,
Beta = Beta,
at_risk = at_risk,
phi = phi,
pi = pi)
log_ratio_z <- loglik_1 - loglik_2
#print(paste0("loglik1: ", loglik_1, " loglik2: ", loglik_2))
if (u > 0)
{
if (log_ratio_z >= log(u))
{
return(Z_proposed)
}else
{
return(Z)
}
}else
{
return(Z)
}
}
|
6e05ff621aada8c21d25eb51d07da8c40c469b24 | d30fe412ba2aca9af494b95c042a7798db83d273 | /movies.R | bbab5859a9ca289abd01e5802b0c83a54907fe32 | [] | no_license | akashjaisingpure/Assignments | 084c9125d92fc51c580c437045d8312f5e3ee0a7 | 6e36b49dbf473ada153f84396a4ac8c39a9d41a8 | refs/heads/master | 2021-03-10T20:03:27.811720 | 2020-08-17T07:19:39 | 2020-08-17T07:19:39 | 246,481,986 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 756 | r | movies.R | #reading the data
library(arules)
movie <- read.transactions(file.choose(),format="basket")
View(movie)
summary(movie)
inspect(movie[1:10])
itemFrequencyPlot(movie, topN=10)
#applying apriori rules#
rules <- apriori(movie,parameter = list(support=0.0909,confidence= 0.85, minlen=3))
inspect(sort(rules,by="lift"))
inspect(tail(sort(rules, by="lift")))
#lower the confidence higher the rules#
#higher confidence level and minimum support gives common rules #
#for uncommon rules increase the support level and decrease the confidence#
##plotting the rules
library(arulesViz)
plot(rules, method = "scatterplot")
#above plot does not make sense so other plots#
plot(rules, method = "grouped")
plot(rules, method = "graph")
|
1c8d6b587dc3682c956e3aebf1ab2df05655cbf5 | 47195fd29fd237ef1b88da004051730d92233a2b | /Chapter_2/VaR_visualisering.R | cc1092f863c613ed1685828b4f692a27d284ba58 | [] | no_license | TokeZinn/P7-Multivariate-GARCH | f09ad3b1fd5687d5c2dc97959d4bbc869f9bbafa | 7c62af7903f1a44608388fa00cb7da89e02ccbe7 | refs/heads/master | 2021-07-15T05:41:36.384939 | 2019-02-12T10:00:14 | 2019-02-12T10:00:14 | 151,698,564 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,465 | r | VaR_visualisering.R | setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
pacman::p_load(tidyverse,emdbook,mvtnorm,plotly)
Sigma <- as.matrix(cbind(c(9,2),c(2,9))) ; e <- c(1,1)
sim <- rmvnorm(10^4,sigma = Sigma)
w <- (solve(Sigma)%*%e) / as.numeric(t(e)%*%solve(Sigma)%*%e)
x <- seq(-10,10,by = 0.05)
y <- seq(-10,10,by = 0.05)
f <- function(x,y){
X = matrix(0, ncol = 2, nrow = length(x))
X[,1] = x
X[,2] = y
return(dmvnorm(X,sigma = Sigma))
}
f_star <- function(x,y){
X = matrix(0, ncol = 2, nrow = length(x))
X[,1] = x
X[,2] = y
I = apply(X,1,FUN = function(x){x %*% w >= qnorm(0.05)})
return(I*dmvnorm(X, sigma = Sigma))
}
func <- function(x,y){
cbind(x,y)%*%w
}
zf <- outer(x,y,FUN = f)
zf_star <- outer(x,y, FUN = f_star)
zf_vec <- outer(x,y,FUN = function(x,y){rep(0.0005,length(x))})
p <- plot_ly(x = ~x, y = ~y, z = ~zf_vec, type = 'scatter3d', mode = 'lines',
opacity = 1, line = list(width = 6, color = "red", reverscale = FALSE)) %>%
add_surface(z = ~zf, opacity = 0.6) %>%
add_surface(z = ~zf_star)
p <- plot_ly(x = x,y=y,z = ~zf_star,type = "surface") %>% add_surface(z = ~zf, opacity = 0.6) %>%
add_trace(x = x, y = x, z = ~rep(0,length(x)), mode = "lines", type = "scatter3d",
line = list(width = 20, color = "red")) %>%
layout(scene = list(
xaxis = list(title = "x"),
yaxis = list(title = "y"),
zaxis = list(title = "Density")
))
|
31befb71b54457dbb4c592947fed7854ad042a9c | 19a8acee397d902c5df3d1fd33627bfdc6e48c11 | /Proyecto_genomica/Progecto_genomica.R | 27fedcedae588c92e8d442d0d13ed2099af8b969 | [] | no_license | Emilio-O/Proyecto_genomica | 0acdb4018d11f7fbf100ce9b9ee5122ddc693c90 | c87e5ba085c02bdbc5e94e55bf005064a476e7bf | refs/heads/main | 2023-05-06T06:41:24.292935 | 2021-05-28T15:27:30 | 2021-05-28T15:27:30 | 369,679,727 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,903 | r | Progecto_genomica.R | setwd("C:/Users/52442/Desktop/Proyecto_genomica/")
memory.size(max = T)
library(Biostrings)
library(msa)
hCoV19_Mexico <- readDNAStringSet(c("Wuhan_refseq.fasta","1_Estado_de_Mexico.fasta", "2_CDMX.fasta",
"3_Jalisco.fasta", "4_Veracruz.fasta", "5_Puebla.fasta",
"6_Guanajuato.fasta", "7_Nuevo_Leon.fasta", "8_Chiapas.fasta",
"9_Michoacan.fasta", "10_Oaxaca.fasta"))
# install.packages("remotes")
# remotes::install_github("mhahsler/rMSA", force = T)
library("rMSA")
Mafft_aln <- mafft(x = hCoV19_Mexico)
Mafft_aln
# save(Mafft_aln, file = "Mafft_aln.RData")
load("Mafft_aln.RData")
#install.packages("ips")
library(ips)
PI_1 <- as.DNAbin(Mafft_aln)
PI_1_frac <- pis(PI_1, what = "frac", use.ambiguities = FALSE)
PI_1_abs <- pis(PI_1, what = "abs", use.ambiguities = FALSE)
PI_1_ind <- pis(PI_1, what = "ind", use.ambiguities = FALSE)
PI_1_frac
PI_1_abs
PI_1_ind
library(ape)
Mafft_aln_nogaps <- deleteGaps(PI_1, gap.max = nrow(PI_1)-4)
dim(Mafft_aln)
dim(Mafft_aln_nogaps)
PI_2_frac <- pis(Mafft_aln_nogaps, what = "frac", use.ambiguities = FALSE)
PI_2_abs <- pis(Mafft_aln_nogaps, what = "abs", use.ambiguities = FALSE)
PI_2_ind <- pis(Mafft_aln_nogaps, what = "ind", use.ambiguities = FALSE)
PI_2_frac
PI_2_abs
PI_2_ind
PI_hCoV19 <- Mafft_aln_nogaps[ , c(PI_2_ind)]
Estados <- c("Wuh", paste0("E_Mex_", seq("1", "20")), paste0("CDMX_", seq("1", "20")),
paste0("Jal_", seq("1", "20")), paste0("Ver_", seq("1", "20")),
paste0("Pue_", seq("1", "20")), paste0("Gua_", seq("1", "20")),
paste0("NL_", seq("1", "20")), paste0("Chi_", seq("1", "20")),
paste0("Mich_", seq("1", "20")), paste0("Oax_", seq("1", "20")))
rownames(PI_hCoV19) <- Estados
rownames(PI_hCoV19)
# save(PI_hCoV19, file = "PI_hCoV19.RData")
# write.nexus.data(PI_hCoV19, file = "PI_hCoV19.nex")
# write.FASTA(PI_hCoV19, file = "PI_hCoV19.fa")
library(seqinr)
library(msa)
PI_hCoV19 <- readDNAMultipleAlignment("PI_hCoV19.fa")
PI_hCoV19 <- msaConvert(PI_hCoV19, type="seqinr::alignment")
D <- dist.alignment(PI_hCoV19, "similarity")
as.matrix(D)
Dendograma <- hclust(D, "average")
plot(Dendograma)
Dendo <- as.phylo(Dendograma)
# write.tree(phy = Dendo, file="Dendo.newick")
# install.packages("png")
library(png)
knitr::include_graphics("Figtree_IPS.png")
# install.packages("bios2mds")
library(bios2mds)
PI_hCoV19 <- import.fasta("PI_hCoV19.fa")
Nombres <- Estados
Estados <- c("Wuh", rep("E_Mex", 20), rep("CDMX", 20),
rep("Jal", 20), rep("Ver", 20), rep("Pue", 20),
rep("Gua", 20), rep("NL", 20), rep("Chi", 20),
rep("Mich", 20), rep("Oax", 20))
Colores <- c("blue", rep("turquoise", 20), rep("purple3", 20),
rep("mediumspringgreen", 20), rep("lightpink1", 20), rep("sienna1", 20),
rep("magenta", 20), rep("darkred", 20), rep("navyblue", 20),
rep("darkgoldenrod1", 20), rep("lightcoral", 20))
Nombres <- sapply(Nombres, function(x) gsub("\"", "", x))
Estados <- sapply(Estados, function(x) gsub("\"", "", x))
Colores <- sapply(Colores, function(x) gsub("\"", "", x))
Grupos <- cbind(Nombres, Estados, Colores)
Grupos <- Grupos[ ,-1]
# write.csv(Grupos, file = "Grupos.csv")
Dist_mat <- mat.dif(PI_hCoV19, PI_hCoV19)
Dist_mat
mmds_hCoV19 <- mmds(Dist_mat, group.file = "C:/Users/52442/Desktop/Proyecto_genomica/Grupos.csv")
scree.plot(mmds_hCoV19$eigen.perc, lab = TRUE, title = "IPS de hCoV19")
mmds.2D <- mmds.2D.plot(mmds_hCoV19, title = "IPS de hCoV19")
mmds.3D <- mmds.3D.plot(mmds_hCoV19, title = "IPS de hCoV19")
library(igraph)
Ady_list <- read.table("FinalytalTABLE.txt")[ ,c(-2)]
Ady_list <- as.matrix(Ady_list)
colnames(Ady_list) <- c("V1", "V2")
Red_popart <- graph_from_edgelist(Ady_list, directed = F)
layout <- layout.fruchterman.reingold(Red_popart)
plot(Red_popart, layout = layout, vertex.size = 4,
vertex.label = NA, edge.arrow.size = .1, vertex.color="gray50")
plot(degree.distribution(Red_popart))
eb <- edge.betweenness.community(Red_popart)
lb <- label.propagation.community(Red_popart)
cs <- cluster_spinglass(Red_popart)
plot(eb, Red_popart, layout = layout, vertex.size = 4,
vertex.label = NA, edge.arrow.size = .1, vertex.color="gray50")
plot(lb, Red_popart, layout = layout, vertex.size = 4,
vertex.label = NA, edge.arrow.size = .1, vertex.color="gray50")
plot(cs, Red_popart, layout = layout, vertex.size = 4,
vertex.label = NA, edge.arrow.size = .1, vertex.color="gray50")
eccentricity(Red_popart)[1]
betweenness(Red_popart)[1]
closeness(Red_popart)[1]
|
c8ba855642580550820b5dee7c9401bfa408871d | 3d1ec18944e584c2f00e2b9902dcaaccb79c8c41 | /man/qdata.Rd | c3246487a37f0a3e0f622a2a43237ceae1953e95 | [] | no_license | tudou2015/cranvas | a84ffebf61fac235959cefb8acbd4c7bdb0d7d58 | af082a2a1cb09d42ca95c0021f8046df5054240f | refs/heads/master | 2020-06-10T18:52:07.441557 | 2015-03-13T17:03:09 | 2015-03-13T17:03:09 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,564 | rd | qdata.Rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{qdata}
\alias{qdata}
\title{Create a mutaframe from data with attributes for interaction}
\usage{
qdata(data, color = "gray15", border = color, size = 4, brushed = FALSE, visible = TRUE,
...)
}
\arguments{
\item{data}{a data frame (it will be coerced to a data frame if it is not)}
\item{color}{colors of graphical elements (default dark gray) corresponding
to rows of data; it can be a vector of valid R colors, or a name of
variable in \code{data} (must be either a factor or a numeric variable), or
an R expression to calculate colors; \code{color} is used to fill the
interior of graphical elements}
\item{border}{colors for the border of graphical elements (e.g. rectangles);
\code{NA} means to suppress the border}
\item{size}{sizes of rows; possible values are similar to \code{color}, but
when using a variable to generate sizes, it must be a numeric variable}
\item{brushed}{a logical vector indicating which rows are brushed (default
all \code{FALSE})}
\item{visible}{a logical vector indicating which rows are visible (default
all \code{TRUE})}
}
\value{
a mutaframe with attributes for interaction
}
\description{
This function will first check if the names of some pre-defined row
attributes (e.g. \code{.color}, \code{.brushed}) exist in the column names of
the data (will issue an error if they do); then append these columns to the
original data to create an augmented data as a
\code{\link[plumbr]{mutaframe}}; in the end add some attributes to the
mutaframe for the purpose of interaction (mainly the \code{\link{brush}}
object and the linking specification).
}
\details{
When the three arguments \code{color}, \code{border} and \code{size} take
values as variable names in \code{data}, default palettes will be used to
generate colors and sizes. The sequential color gradient palette
(\code{\link[scales]{seq_gradient_pal}}) will be applied to continuous
variables, and the hue palette (\code{\link[scales]{hue_pal}}) will be
applied to categorical variables. The area palette
(\code{\link[scales]{area_pal}}) is used to create a size vector when the
size variable is continuous. An attribute \code{attr(data, 'Scales')} is
attached to the returned mutaframe, which will help specific plots to
generate legends. This attribute is of the form \code{list(color =
list(title, variable, palette))}. Whenever any component is changed, the
corresponding aesthetics will be updated automatically; for example, if we
change the palette function for \code{color}, the colors \code{data$.color}
will be updated using the new palette. See \code{\link{color_pal<-}} for a
list of functions on how to modify scales information.
}
\examples{
library(cranvas)
str(tennis)
## more Aces, closer to red; less, blue; higher speed, larger points
qtennis <- qdata(tennis, color = aces, size = serve.speed)
qhist(aces, data = qtennis, main = "Number of Aces")
ls.str(attr(qtennis, "Scales")) # the scales information
selected(qtennis)[1:10] <- TRUE # brush the first 10 cases
b <- brush(qtennis) # the brush object
b$style # style the brush rectangle
b$style$color <- "brown" # a brown brush
b$color # color of brushed elements
b$color <- "cyan" # brushed elements become cyan
attr(qtennis, "Shadow") # should be NULL, since no misssing values here
## we can also use the default dark gray
qtennis <- qdata(tennis)
qhist(double.faults, data = qtennis)
cranvas_off()
}
\author{
Yihui Xie <\url{http://yihui.name}>
}
\seealso{
\code{\link[plumbr]{mutaframe}}, \code{\link{brush}}
}
|
0861608c7a08e64edb4f616b2c4c4c70437008f8 | fd6fc65d8014e1a6e75bbf81f88fc4b2bd78ea95 | /man/glbin_lcd.Rd | da73be1eebd3f78d3f71aacc7693bb763eac632c | [] | no_license | jeliason/glbinc | 4e622d1b09427074f5ca487220be850839770963 | 072979519bef0fcb79993ab2926577e245768acf | refs/heads/master | 2023-03-16T07:59:39.289718 | 2019-07-30T08:17:13 | 2019-07-30T08:17:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 908 | rd | glbin_lcd.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glbin-lcd.R
\name{glbin_lcd}
\alias{glbin_lcd}
\title{Block coordinate gradient descent for logistic group lasso}
\usage{
glbin_lcd(X, y, offset, index, eps = 1e-04, lambda, lambda.min = 0.01,
nlambda = 50, dfmax = ncol(X) + 1, verb = 1, std = TRUE, alpha = 1,
maxit = 1000, stability_eps = 1e-05)
}
\arguments{
\item{X}{covariate/design matrix. intercept is taken to be the first column!}
\item{y}{response 0/1 vector}
\item{offset}{offset terms}
\item{index}{grouping. set 0 or NA for unpenalised terms. if not given, assumes X[,1] is intercept}
\item{eps}{convergence criterion}
\item{lambda}{vector of lambdas}
\item{lambda.min}{fraction of max lambda to go down to}
\item{nlambda}{number of penalties}
\item{dfmax}{max df, stop if reached}
\item{verb}{verbosity}
}
\description{
Like in Breheny and Huang 2009
}
|
c5da3514c9bb8243621656c9941a0be2a40ea416 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/funData/examples/dimSupp.Rd.R | 715bd2061f9cb2fa019f20a6ff5e9e93c877d6ae | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 570 | r | dimSupp.Rd.R | library(funData)
### Name: dimSupp
### Title: Support dimension of functional data
### Aliases: dimSupp
### ** Examples
# Univariate (one-dimensional)
object1 <- funData(argvals = 1:5, X = rbind(1:5, 6:10))
dimSupp(object1)
# Univariate (two-dimensional)
object2 <- funData(argvals = list(1:10, 1:5), X = array(rnorm(100), dim = c(2,10,5)))
dimSupp(object2)
# Univariate (irregular)
irregObject <- irregFunData(argvals = list(1:5, 2:4), X = list(2:6, 3:5))
dimSupp(irregObject)
# Multivariate
multiObject <- multiFunData(object1, object2)
dimSupp(multiObject)
|
48e7521b9bba452e087fef3898b0ebb24ed744c9 | 8cf88ae109da417b513b5411cb85b6a33ce0c16c | /Functions/clean catch data.R | 73d018b7796ab1619f4b567643a0f360fea7ed68 | [
"MIT"
] | permissive | cheesesnakes/sea-snake-resource-use | 33e8db58a9d1124dd03c2afd5d50dab7b414f97c | 27e16f4eeae2832693563fa826cf0579a07da04c | refs/heads/master | 2023-06-29T01:54:02.465837 | 2021-08-05T09:32:11 | 2021-08-05T09:32:11 | 265,837,034 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,505 | r | clean catch data.R | #importing catch data and fishing info
ssf = read.csv("Data/Fisheries Catch Data_SSF.csv")
trawler_catch = read.csv("./Data/Fisheries Catch Data_Trawler.csv")
trawler_lvb = read.csv("./Data/Fisheries Catch Data_LVB.csv")
gutcontent = read.csv("./Data/Sea_snakes_gut_content_2018-19.csv")
trawler_info = read.csv("./Data/Fisheries Catch Data_Trawler Info.csv")
# Tonnage per trip
tonnage_tr = trawler_catch%>%
rename(Date = ï..Date, Boat.Name = Boat.name)%>%
group_by(Date, Boat.Name)%>%
summarise(Total.Catch..kg. = sum(Weight..kg.))%>%
mutate(Gear.Type = "Trawler")%>%
drop_na()
tonnage_gn = ssf%>%
rename(Date = ï..Date)%>%
mutate(Gear.Type = substr(ssf$Gear.Id, 1, 2))%>%
mutate(Gear.Type = if_else(Gear.Type == "GN", "Gill Net", "Beach Seine"))%>%#getting gear type
select(Date, Boat.Name, Gear.Type, Total.Catch..kg.)%>%
distinct()
tonnage <- bind_rows(tonnage_gn, tonnage_tr)%>%
filter(Gear.Type == "Gill Net" | Gear.Type == "Trawler")
write.csv(tonnage, "./Data/catch tonnage.csv")
#extracting list of fish families from trawler catch
fish_fam = trawler_lvb%>%
left_join(trawler_info, "Sample")%>%
unite(Scientific.Name, c("Genus", "Species"), sep = " ")%>%#creating column for sci name
filter(Scientific.Name != " ")%>%
dplyr::select(Scientific.Name, Family)%>%
distinct()
write.csv(fish_fam, "./Data/fish_fam.csv")
#Joining fishing info to trawler low value bycatch and standarising
trawler_lvb = trawler_lvb%>%
left_join(trawler_info, "Sample")%>%
mutate(Date = dmy(Date.x))%>%
unite(Sample, c(Date, Boat.name), remove = F)%>%
unite(Scientific.Name, c("Genus", "Species"), sep = " ")%>%#creating column for sci name
dplyr::select(Sample, Scientific.Name, Family, Total.Species.Weight..g.,
Trash..Discards, Fishing.type, Avg..no..of.hauls.day, Avg..haul.duration..hours.)%>%
rename(Weight.g = Total.Species.Weight..g.,# weight is for all of one species in sample
Haul.time = Avg..haul.duration..hours.,
No.hauls = Avg..no..of.hauls.day,
Gear.Type = Fishing.type,
Class = Trash..Discards)%>%
mutate(Haul.time = 60*(Haul.time))%>%#conveting haultime to minutes
distinct(Sample, Scientific.Name, .keep_all = T)%>%#removing repeats
group_by(Gear.Type, Sample, Family)%>%
summarise(Weight.g = sum(Weight.g, na.rm = T),# calculating wwight of family in sample
Class = last(Class),
Haul.time = last(Haul.time),
No.hauls = last(No.hauls))
#standardising catch data
ssf = ssf%>%
rename(Date = ï..Date)%>%
unite(Scientific.Name, c(Genus, Species), sep = " ", remove = F)%>%
mutate(Gear.Type = substr(ssf$Gear.Id, 1, 2))%>%
mutate(Gear.Type = if_else(Gear.Type == "GN", "Gill Net", "Beach Seine"))%>%#getting gear type
left_join(fish_fam)%>%#adding families
dplyr::select(Gear.Id, Scientific.Name, Family, Weight..g.,
Category, Gear.Type, Haul.time..min.)%>%
rename(Sample = Gear.Id,
Weight.g = Weight..g.,
Haul.time = Haul.time..min.,
Class = Category)%>%
mutate(No.hauls = 1)%>%
group_by(Gear.Type, Sample,Family, Scientific.Name)%>%
summarise(Weight.g = sum(Weight.g, na.rm = T),#calculating weight of species in sample
Class = last(Class),
Haul.time = last(Haul.time),
No.hauls = last(No.hauls))%>%
group_by(Gear.Type, Sample, Family)%>%
summarise(Weight.g = sum(Weight.g, na.rm = T),#calculating weight of family in sample
Class = last(Class),
Haul.time = last(Haul.time),
No.hauls = last(No.hauls))
trawler_catch = trawler_catch%>%
rename(Date = ï..Date,
Scientific.Name = Species)%>%
mutate(Date = dmy(Date))%>%
unite(Sample, c(Date, Boat.name), remove = F)%>%
left_join(fish_fam)%>%
dplyr::select(Sample, Scientific.Name, Family, Weight..kg., Class)%>%
rename(Weight.g = Weight..kg.)%>%
mutate(Weight.g = 1000*Weight.g,
Gear.Type = "Trawler")%>%
group_by(Gear.Type, Sample,Family, Scientific.Name)%>%
summarise(Weight.g = sum(Weight.g, na.rm = T),#calculating weight of species in sample
Class = last(Class))%>%
group_by(Gear.Type, Sample, Family)%>%
summarise(Weight.g = sum(Weight.g, na.rm = T),#calculating weight of family in sample
Class = last(Class))
# Combining catch data
catch <- bind_rows(ssf, trawler_lvb, trawler_catch)%>%
filter(Gear.Type == "Trawler" | Gear.Type == "Gill Net")
write.csv(catch, "./Data/catch.csv")
|
63da5baae51985b275dee45735fcb20040c49f4f | e863d45e013b72f5d24127fb0eb44aad00930d23 | /MM703/w09.R | ecb7d50808c5fdcc0dddb1110f858336e611a82a | [] | no_license | pkkirilov/MSc-Data-Analytics | 31d52696c474b8aaa02e76d039d628189d8dc982 | 969ed88bd5a32f31a1a8e09e6cd699e466870981 | refs/heads/master | 2021-01-20T17:02:44.366619 | 2017-11-26T10:14:51 | 2017-11-26T10:14:51 | 82,820,580 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,324 | r | w09.R | #Lecture 9 Continuous dist
#Lec example
1-pnorm(12, 12, 3)
sum(dnorm(9:14, 12, 3))
pnorm(15,12,3)-pnorm(9, 12,3)
pnorm(15,12,3)
qnorm(0.75,12,3)
#Q1
dpois(0,1)
dpois(1,1)
ppois(1,1)
ppois(2,1)
1-ppois(1,1)
dpois(0,2)
dpois(1,2)
ppois(1,2)
ppois(2,2)
1-ppois(1,2)
#Q2
dpois(3,1)
ppois(2,1)
1-ppois(4,1)
ppois(3,1)-ppois(0,1)
#Q3
5/20 #gives 0.25 psm
5/20*16 #same as variance due to poisson properties
dpois(0,4)
dpois(6,4)
ppois(6,4)
1-sum(dpois(0:5,4)) #or 1-ppois(5,4)
#Q4
dpois(0,15)
#Q5
1-pexp(70,0.2)
#Q6
pexp(1.5,0.4)-pexp(1,0.4)
dexp(1,0.4)
dpois(0,0.4) #or 1-pexp(1,0.4)
1-ppois(1,0.4)
#Q7
dpois(3,3.5)
1-ppois(2,3.5)
dpois(9,3.5*3)
dpois(3,3.5)^3 #as the breakdown happens independently
pexp(1,3.5)
pexp(96/168,3.5)-pexp(12/168,3.5)
pexp(4/7,3.5)-pexp(3/7,3.5)
#Q8
pexp(100,1/1000, FALSE)*pexp(100,1/300,FALSE)*pexp(100,1/150,FALSE)
#Q9
1-pexp(40,1/80, FALSE)^5
#Q10
punif(1,0,2)
1-punif(1.5,0,2)
punif(1.5,0,2)-punif(0.5,0,2)
#Q11
1/dunif(230,227.5,232.5)
punif(231.6,227.5,232.5)-punif(229.3,227.5,232.5)
#for a continuous dist the prob to take ANY fixed value is 0
1-punif(130,227.5,232.5)
#Q12
1-pnorm(8,6,2)
pnorm(8,6,2)
pnorm(8,6,2)-pnorm(6,6,2)
pnorm(0,6,2)
1-qnorm(0.25,6,2)
1-qnorm(0.75,6,2)
#Q13
pnorm(500,450,100)-(pnorm(400,450,100))
1-pnorm(480,450,100)
qnorm(0.1,450,100,FALSE)
#Q14
18-0.6*qnorm(0.2)
|
adb9928a7f4a19ba0ce93080e3f04f0cb94ff885 | 585935570659d04383685d3f8acd03d5f0efb81a | /src/scripts/findPredictionError_boot_Top.R | 67e4f52cfa8672239febdcd470002df71888c42b | [] | no_license | debmandal/Surprisingly-Popular-Voting | d68e0eda0fa0721c5b5f37322cac1f7b24c5205d | 27238902a66129d2c4a52fe33f5b148ae638e4f3 | refs/heads/main | 2023-05-10T10:06:10.614052 | 2021-06-09T16:59:09 | 2021-06-09T16:59:09 | 364,456,833 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,433 | r | findPredictionError_boot_Top.R | responses <- read_csv('~/Documents/SPVoting-Analysis/Analysis/question_responses.csv')
# use regex to find the predictions
responses$prediction <- gsub(".*\\[", "", responses$response)
responses$options <- strsplit(gsub('\\[|\\]|[[:space:]]', '', responses$options), ",")
responses$prediction <- strsplit(gsub('\\[|\\]|[[:space:]]', '', responses$prediction), ",")
responses$signal <- gsub("\\].*", "", responses$response)
responses$signal <- strsplit(gsub('\\[|\\]|[[:space:]]', '', responses$signal), ",")
responses$treatment <- as.factor(responses$treatment)
levels(responses$treatment) <- c('Top-None', 'Top-Top', 'Top-Rank', 'Rank-None', 'Rank-Top', 'Rank-Rank')
responses$domain <- as.factor(responses$domain)
levels(responses$domain) <- c('Geography','Movies','Paintings')
TALoss <- function(r1,alt)
{
res1 <- r1
if(strtoi(res1[[1]][1]) == strtoi(alt) ){
return(0)
}
else{
return(1)
}
}
KTDist.top <- function(r1, alt)
{
order1 <- c(r1)
pos <- match(alt, order1)
return(pos + 0.5)
}
#assume list input as ranks r1 and r2
KTDist.list <- function(r1,r2)
{
order2 <- c(r2)
#print(order2)
order1 <- c(r1)
#print(order1)
#print(order2)
#true <- strtoi(order1)
#reported <- strtoi(order2)
#print(order1)
#print(order2)
locs <- c(1:4)
for (i in 1:4) {
locs[i] <- match(order2[i], order1)
}
dist <- 0
for(i in 1:3)
{
for(j in (i+1):4)
{
if(locs[j] < locs[i])
{
dist <- dist + 1
}
}
}
return(dist)
}
getTopError <- function(predictions) {
return(map2_dbl(predictions$options, predictions$prediction, TALoss))
}
getRankError <- function(predictions) {
top_alts <- map(predictions$prediction, function(x){return(x[[1]][1])})
return(map2_dbl(predictions$options, top_alts, TALoss))
}
getTopErrorSig <- function(signals){
return(map2_dbl(signals$options, signals$signal, TALoss))
}
getRankErrorSig <- function(signals){
top_alts <- map(signals$signal, function(x){return(x[[1]][1])})
return(map2_dbl(signals$options,top_alts, TALoss))
}
df <- tibble(
treatment := NA,
type := NA,
# domain := NA,
y := 0,
ymin := 0,
ymax := 0
)
#for(dom in c('Geography', 'Movies', 'Paintings'))
#{
#rank error for all domains combined
for(tr in c('Top-Top', 'Top-Rank', 'Rank-Top', 'Rank-Rank'))
{
if(tr %in% c('Top-Top', 'Rank-Top'))
{
# dftop <- responses %>% filter(treatment == tr & domain == dom) %>% select(question, options, prediction)
dftop <- responses %>% filter(treatment == tr) %>% select(question, options, prediction)
res <- getTopError(dftop)
print(c(tr, 'prediction', mean(res), 1.975*sd(res)/sqrt(length(res))) )
#df <- df %>% add_row(treatment = tr, type = 'Prediction', domain=dom, y=mean(res), ymin = mean(res) - 1.975*sd(res)/sqrt(length(res)), ymax = mean(res) + 1.975*sd(res)/sqrt(length(res)) )
df <- df %>% add_row(treatment = tr, type = 'Prediction', y=mean(res), ymin = mean(res) - 1.975*sd(res)/sqrt(length(res)), ymax = mean(res) + 1.975*sd(res)/sqrt(length(res)) )
}
else if(tr %in% c('Top-Rank', 'Rank-Rank'))
{
#dfrank <- responses %>% filter(treatment == tr & domain == dom) %>% select(question, options, prediction)
dfrank <- responses %>% filter(treatment == tr) %>% select(question, options, prediction)
res <- getRankError(dfrank)
print(c(tr, 'prediction', mean(res), 1.975*sd(res)/sqrt(length(res))) )
#df <- df %>% add_row(treatment = tr, type = 'Prediction', domain=dom, y=mean(res), ymin = mean(res) - 1.975*sd(res)/sqrt(length(res)), ymax = mean(res) + 1.975*sd(res)/sqrt(length(res)) )
df <- df %>% add_row(treatment = tr, type = 'Prediction', y=mean(res), ymin = mean(res) - 1.975*sd(res)/sqrt(length(res)), ymax = mean(res) + 1.975*sd(res)/sqrt(length(res)) )
}
}
#sginal error for all domains combined
for(tr in c('Top-Top', 'Top-Rank', 'Rank-Top', 'Rank-Rank'))
{
if(tr %in% c('Top-Top', 'Top-Rank'))
{
dftop <- responses %>% filter(treatment == tr) %>% select(question, options, signal)
#dftop <- responses %>% filter(treatment == tr & domain == dom) %>% select(question, options, signal)
res <- getTopErrorSig(dftop)
print(c(tr, 'Vote', mean(res), 1.975*sd(res)/sqrt(length(res))) )
#df <- df %>% add_row(treatment = tr, type = 'Vote', domain=dom, y=mean(res), ymin = mean(res) - 1.975*sd(res)/sqrt(length(res)), ymax = mean(res) + 1.975*sd(res)/sqrt(length(res)) )
df <- df %>% add_row(treatment = tr, type = 'Vote', y=mean(res), ymin = mean(res) - 1.975*sd(res)/sqrt(length(res)), ymax = mean(res) + 1.975*sd(res)/sqrt(length(res)) )
}
else if(tr %in% c('Rank-Top', 'Rank-Rank'))
{
#dfrank <- responses %>% filter(treatment == tr & domain == dom) %>% select(question, options, signal)
dfrank <- responses %>% filter(treatment == tr) %>% select(question, options, signal)
res <- getRankErrorSig(dfrank)
print(c(tr, 'Vote', mean(res), 1.975*sd(res)/sqrt(length(res))) )
#df <- df %>% add_row(treatment = tr, type = 'Vote', domain=dom, y=mean(res), ymin = mean(res) - 1.975*sd(res)/sqrt(length(res)), ymax = mean(res) + 1.975*sd(res)/sqrt(length(res)) )
df <- df %>% add_row(treatment = tr, type = 'Vote', y=mean(res), ymin = mean(res) - 1.975*sd(res)/sqrt(length(res)), ymax = mean(res) + 1.975*sd(res)/sqrt(length(res)) )
}
}
#}
|
98eb772926f385e70a8c3bc7d7d7f0ef65d241dc | 4848ca8518dc0d2b62c27abf5635952e6c7d7d67 | /R/ATS_21_i_2lp1.R | e6c125b5bc0b20fc1fa520947b8a56b93a59b319 | [] | no_license | regenesis90/KHCMinR | ede72486081c87f5e18f5038e6126cb033f9bf67 | 895ca40e4f9953e4fb69407461c9758dc6c02cb4 | refs/heads/master | 2023-06-28T00:29:04.365990 | 2021-07-22T04:44:03 | 2021-07-22T04:44:03 | 369,752,159 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,230 | r | ATS_21_i_2lp1.R | #' Average Travel Speed of One-direction, i Section in 2+1 lane road
#'
#' This function follows <Formula 7-15> in KHCM(2013), p.187.
#' @param ATS_up The speed of the one-way basic section of the i-th two-lane road (upstream part of the 2+1 lane road)
#' @param ATS_dn Traffic speed in one direction downstream of the i-th 2+1 lane road
#' @param fs_pl Travel speed correction factor for the i-th overtaking lane section. See \code{\link{fs_pl_2lp1}}
#' @param L_up Analysis section length (m) upstream of the overtaking lane section among the i-th analysis target section
#' @param L_dn The length of the analysis target section before entering the i-th overtaking lane (m)
#' @param L_pl Section length of the i-th overtaking lane (m)
#' @param f_w_ATS_up Lane width and side clearance width of the downstream section of the i-th overtaking lane. See \code{\link{f_w_ATS_2l}}
#' @param f_w_ATS_pl Lane width and side clearance width of the i-th overtaking lane section. See \code{\link{f_w_ATS_2l}}
#' @param f_w_ATS_dn Lane width and side clearance width of the downstream section of the i-th overtaking lane. See \code{\link{f_w_ATS_2l}}
#' @keywords
#' @export ATS_21_i_2lp1 Average travel speed of a 2+1 lane road
#' @examples
ATS_21_i_2lp1 <- function(ATS_up = NULL, ATS_dn = NULL, fs_pl = NULL, L_up = NULL, L_dn = NULL, L_pl = NULL, f_w_ATS_up = NULL, f_w_ATS_dn = NULL, f_w_ATS_pl = NULL){
if (length(ATS_up) == length(ATS_dn) & length(ATS_up) == length(ATS_dn) & length(ATS_up) == length(fs_pl) & length(ATS_up) == length(L_up) & length(ATS_up) == length(L_dn) & length(ATS_up) == length(L_pl) & length(ATS_up) == length(f_w_ATS_up) & length(ATS_up) == length(f_w_ATS_dn) & length(ATS_up) == length(f_w_ATS_pl)){
ats_sum <- 0
for (i in 1:length(ATS_up)){
p <- (ATS_up[i] - f_w_ATS_up[i])/(ATS_up[i] - f_w_ATS_pl[i])
q <- (ATS_up[i] * fs_pl[i] * p - f_w_ATS_pl[i])/(ATS_up[i] * fs_pl[i] * p - f_w_ATS_dn[i])
ats <- (L_up[i] + L_pl[i] + L_dn[i])/((L_up[i] / ATS_up[i]) + (L_pl[i] / (ATS_up[i] * fs_pl[i] * p)) + (L_dn / (ATS_dn * q)))
ats_sum <- ats_sum + ats
}
}
else {ats_sum <- 'Error : Length of each arguments must be same with each other. Please check that.'}
ats_sum
}
|
c7dd267ace668aefba7156d640469596afd6b83f | 8aa563b38192da5f8a7cf0860c9decb792eea9c8 | /AstroDataSets/mmfs_data/CarinaFDR-Reg.R | 6d2aee00c77c87c5ba31b6d54adac8e4837c957b | [] | no_license | NabarunD/NPMLEmix | 45c55d041cd2229111a76b01970186f01cd09593 | f4095847202e1e01ca2380d1aff3b741f318cc17 | refs/heads/master | 2021-07-11T15:10:21.728904 | 2020-06-18T21:21:59 | 2020-06-18T21:21:59 | 157,926,617 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 343 | r | CarinaFDR-Reg.R |
data = read.table('carina.dat')
x = data[data$V8 + data$V9> 0,]
# x = x[x$V6 < 3,]
rv = x$V4; # represents the Radial velocity data of stars in the Carina galaxy
rr = x$V1;
th = x$V2;
mg = x$V6;
x1 = rr*cos(th); x2 = rr*sin(th);
r = sqrt(x1^2 +x2^2);
plot(x1,x2)
plot(r,rv)
plot(mg, rv)
#plot(r,rv)
dens = density(rv);
plot(dens)
|
bc5057f418d87792c88547e6fa78c666a51a2343 | ad28636649343e836cc2dff67a94e1876dbc6dd8 | /man/epidemic_timeseries_plot.Rd | 04dd2e525a9e0655aeb89e3ea1ebb2e96a194571 | [] | no_license | OJWatson/outbreakteachR | 63954ca86c0cef95e94adf43bd5a4b1f345a2d67 | db39fe1de7c4ccb013632a47ea5aaa3716ed8891 | refs/heads/master | 2020-12-30T13:20:04.423650 | 2018-11-21T10:47:07 | 2018-11-21T10:47:07 | 91,345,394 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 570 | rd | epidemic_timeseries_plot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/epidemic-timeseries-plot.R
\name{epidemic_timeseries_plot}
\alias{epidemic_timeseries_plot}
\title{Plot epidemic time series}
\usage{
epidemic_timeseries_plot(outbreak.dataset, title = NULL)
}
\arguments{
\item{outbreak.dataset}{Outbreak Dataset}
\item{title}{Plot title. If NULL then no title will be added. Defult = NULL}
}
\description{
\code{epidemic_timeseries_plot} takes output of \code{outbreak_dataset_read} and
plots SIR epidemic time series. Returns ggplot object for plotting.
}
|
1caf5488efdde453f2cc118d9cd7018f30e1f3f5 | 41edcef0f23e5e407ffe94654faf131973b8f398 | /scripts/3. rate-evo-psychs/server.R | 9a5a914a7830ac15927df0d05d46f93ca0436b53 | [
"MIT"
] | permissive | d-vanos/Twitter-Political-Ideology | 5f77d275ef6282a145125a7b94da4e9fa1e53be2 | bd406d7b37968417af7b1134a6605228a1042d5b | refs/heads/master | 2023-02-15T18:18:22.829461 | 2021-01-11T04:42:24 | 2021-01-11T04:42:24 | 282,373,238 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,718 | r | server.R |
library(shiny)
library(tidyverse)
library(DT)
library(googlesheets4)
library(googledrive)
shinyServer(function(input, output) {
options(
gargle_oauth_cache = ".secrets",
gargle_oauth_email = "dvanos@outlook.com.au"
)
drive_auth()
sheets_auth()
# FUNCTIONS
# Reduce datasets down
reduce <- function(data){
data <- data %>%
select(ID, Name, Twitter_Bio, Twitter_Handle, selected) %>%
mutate(ID = as.numeric(ID),
selected = as.numeric(selected),
selected = ifelse(selected == -99, NA, selected))
return(data)
}
# Round
round_down <- function(num){floor(num/10)*10}
# Set reactive values
data <- reactiveValues()
# EVO
# Import data
evo_ID <- gs4_find("hbes_3_extract_user")$id
evo_extracted <- read_sheet(evo_ID, sheet = 1)
# Format
evo_extracted <- reduce(evo_extracted)
# Initialising counter
evo_counter <- reactiveValues(countervalue = evo_extracted$'ID'[which(is.na(evo_extracted$selected))[1]])
evo_NAs <- ifelse(is.na(evo_extracted$'ID'[which(is.na(evo_extracted$selected))[1]]), 0, evo_extracted$'ID'[which(is.na(evo_extracted$selected))[1]])
evo_counter_upload <- reactiveValues(countervalue = 10-evo_NAs)
data$evo_extracted <- evo_extracted
# Creating tables
output$evo_table = DT::renderDataTable({data$evo_extracted %>%
filter(ID == evo_counter$countervalue) %>%
select(-ID)},
selection = 'single',
options = list(dom = 't', # Only shows table (no search, number of entries, etc.)
ordering=F)) # Stops sorting columns
# Number of entries before upload
output$evo_count <- renderText({
paste("Number of entries before upload:", evo_counter_upload$countervalue) # print the latest value stored in the reactiveValues object
})
output$evo_total <- renderText({
paste("Total left:", (max(data$evo_extracted$'ID') - evo_counter$countervalue))
})
#Click Next
observeEvent(input$evo_increase, {
row_ID <- which(data$evo_extracted$'ID' == evo_counter$countervalue)
data$evo_extracted$'selected'[row_ID] <- 0
data$evo_extracted$'selected'[row_ID[1] + input$evo_table_rows_selected - 1] <- 1 # Of the people with a particular ID, selecting the correct row
evo_counter$countervalue = evo_counter$countervalue + 1
evo_counter_upload$countervalue = evo_counter_upload$countervalue - 1
})
# Click Back
observeEvent(input$evo_decrease, {
evo_counter$countervalue = evo_counter$countervalue - 1
})
# Upload
observeEvent(input$evo_upload, {
evo_current_row <- which(data$evo_extracted$'ID' == evo_counter$countervalue)[length(which(data$evo_extracted$'ID' == evo_counter$countervalue))]
evo_last_saved_row <- which(data$evo_extracted$'ID' == round_down(evo_counter$countervalue))[1]
evo_save_data <- as_tibble(data$evo_extracted$'selected'[evo_last_saved_row:evo_current_row])
colnames(evo_save_data) <- "selected"
evo_save_data <- evo_save_data %>% mutate(selected = ifelse(is.na(selected), -99, selected))
evo_range <- paste0("R", evo_last_saved_row+1, ":R", evo_current_row+1)
range_write(ss = evo_ID,
data = evo_save_data,
range = evo_range,
col_names = FALSE)
})
# Saving the data every 10 people
observeEvent(evo_counter$countervalue %% 10 == 0, {
if(evo_counter$countervalue > 5 & evo_counter$countervalue %% 10 == 0){ # Otherwise it has an issue with the countervalue
start_row <- which(data$evo_extracted$'ID' == evo_counter$countervalue - 10)[1] # The first row of the countervalue 5 people ago
current_row <- which(data$evo_extracted$'ID' == evo_counter$countervalue-1)[length(which(data$evo_extracted$'ID' == evo_counter$countervalue-1))] # The last row of the most recent countervalue
evo_save_data <- as_tibble(data$evo_extracted$'selected'[start_row:current_row])
evo_range <- paste0("R", start_row+1, ":R", current_row+1)
range_write(ss = evo_ID,
data = evo_save_data,
range = evo_range,
col_names = FALSE)
evo_counter_upload$countervalue = 10
}
})
# Download
output$evo_download <- downloadHandler(
filename = "evo_extracted.csv",
content = function(file) {
write_csv(data$evo_extracted, na = "", file)
})
})
|
9a76d5b79248e9da4d3465f6f344608fb44b0a19 | bdab686e5894eec9db3f5bfd323735ab9f1b790a | /scripts/dvdt.R | b5c8c66eed4e5384ffc109bf40872375b19c0c73 | [] | no_license | larsgr/eve_analysis | 94f76377a510485fad9da9ffcbb361779b374caa | cf7868f89d3969f38d3b2828764a99a74f2642cf | refs/heads/master | 2020-05-16T13:33:58.415241 | 2019-04-23T19:13:37 | 2019-04-23T19:13:37 | 183,078,400 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,732 | r | dvdt.R | # This file contains functions to calculate gene likelihoods assuming an individual beta value between genes.
# To run the calculation as a whole execute the function calculateLLIndivBeta. The total likelihood result
# will be returned from the function. The full results will be stored to an .RData file; to view these results
# call load("./results/llindivbetaresults.RData").
# Author: Rori Rohlfs, Lars Gronvold, John Mendoza
# Date 2/25/19
#Include all our necessary libraries
source('scripts/EVEcore.R')
LLPerGeneIndivBeta <- function(param.matrix.row, tree, gene.data.row, index.expand)
{
ll <- -logLikOU( theta = param.matrix.row[1],
sigma2 = param.matrix.row[2],
alpha = param.matrix.row[3],
beta = param.matrix.row[4],
tree, gene.data.row, index.expand)
return(ll)
}
# Maximum liklihood estimation of parameters with individual betas per gene
fitIndivBeta <- function(tree, gene.data, colSpecies = colnames(gene.data))
{
#Calculate the per gene parameter matrix based on the gene data
initial.param.matrix <- initialParams(gene.data, colSpecies)
# match the column species with the phylogeny tip labels
index.expand <- match(colSpecies, tree$tip.label)
# Calculate max alpha based on the proportion of variance from covariance
alphaMax <- -log(.01) / min(tree$edge.length[tree$edge[,2] <= Ntip(tree)])
# For each gene, optimize the parameters and store the resulting likelihood in the likelihood vector
lapply(1:nrow(gene.data), function(row){
# Error handling to catch infinte optim or function values that arise when data with NaN paramters is optimized
res <- tryCatch({
optim(initial.param.matrix[row, ], fn = LLPerGeneIndivBeta, gr = NULL, tree, gene.data[row, ], index.expand,
method = "L-BFGS-B", lower = c(-Inf, 1e-10, 1e-10, 1e-10), upper = c(Inf, Inf, alphaMax, Inf))
}, error = function(e) {
warning(paste(e$message, "at gene.data row", row), immediate. = T)
})
}) -> res
return(res)
}
LLPerGeneSharedBeta <- function(param.matrix.row, betaShared, tree, gene.data.row, index.expand)
{
ll <- logLikOU( theta = param.matrix.row[1],
sigma2 = param.matrix.row[2],
alpha = param.matrix.row[3],
beta = betaShared,
tree, gene.data.row, index.expand)
return(-ll)
}
fitSharedBeta <- function(betaShared, tree, gene.data, colSpecies = colnames(gene.data))
{
#Calculate the per gene parameter matrix based on the gene data
initial.param.matrix <- initialParams(gene.data, colSpecies)
# match the column species with the phylogeny tip labels
index.expand <- match(colSpecies, tree$tip.label)
# Calculate max alpha based on the proportion of variance from covariance
alphaMax <- -log(.01) / min(tree$edge.length[tree$edge[,2] <= Ntip(tree)])
# For each gene, optimize the parameters and store the resulting likelihood in the likelihood vector
lapply(1:nrow(gene.data), function(row){
optim( initial.param.matrix[row, 1:3], fn = LLPerGeneSharedBeta, method = "L-BFGS-B",
lower = c(-Inf, 1e-10, 1e-10, 1e-10), upper = c(Inf, Inf, alphaMax, Inf),
betaShared= betaShared,
tree = tree, gene.data.row = gene.data[row, ], index.expand=index.expand)
}) -> res
return(res)
}
LLSharedBeta <- function(betaShared, ...)
{
cat("LLSharedBeta: beta =",betaShared)
resSharedBeta <- fitSharedBeta(betaShared, ...)
sumLL <- sum(sapply(resSharedBeta, function(res) res$value ))
nNotConverged <- sum(sapply(resSharedBeta, function(res) res$convergence )!=0)
if( nNotConverged>0 ){
cat(" ",nNotConverged,"gene(s) did not converge!")
}
cat(" LL =",sumLL,"\n")
# return sum of -LL for all genes
return(sumLL)
}
betaSharedTest <- function(tree, gene.data, colSpecies = colnames(gene.data)){
cat("fit with individual betas...\n")
indivBetaRes <- fitIndivBeta(tree,gene.data,colSpecies)
cat("Estimate shared beta...\n")
sharedBetaFit <- optimize(f = LLSharedBeta,interval=c(0.0001,100),
tree=tree, gene.data=gene.data)
sharedBeta <- sharedBetaFit$minimum
cat("fit with shared beta =",sharedBeta,"...\n")
sharedBetaRes <- fitSharedBeta(sharedBeta, tree, gene.data, colSpecies)
# calculate likelihood ratio test statistic
LRT <- mapply(function(indivBetaRow, sharedBetaRow)
{
(2 * -(indivBetaRow$value)) - (2 * -(sharedBetaRow$value))
}, indivBetaRes, sharedBetaRes)
return( list(indivBetaRes = indivBetaRes,
sharedBeta = sharedBeta,
sharedBetaRes = sharedBetaRes,
LRT = LRT) )
}
|
108bfd09ccb9f93d6a3e716f4c517c63e7af0292 | a3541fa9afdcbc4bd1360afda4b6f8d170244889 | /man/ejscreen.acs.rename.Rd | c3da8821f3daed8bd62f3db41c7e143db8ceb40f | [] | no_license | ejanalysis/ejscreen | 4349236260c94dd9a9d0cfdcae237adebcec2d8a | 6af10b7d3b47c683cb512fd4792c2eef0e1d695a | refs/heads/master | 2023-05-27T11:59:13.144072 | 2023-05-25T23:40:52 | 2023-05-25T23:40:52 | 40,103,218 | 7 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,046 | rd | ejscreen.acs.rename.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ejscreen.acs.rename.R
\name{ejscreen.acs.rename}
\alias{ejscreen.acs.rename}
\title{Rename Fields of ACS Data for Use in EJSCREEN}
\usage{
ejscreen.acs.rename(acsraw, folder = getwd(), formulafile)
}
\arguments{
\item{acsraw}{Data.frame of raw data counts for each block group, such as population or number of Hispanics.}
\item{folder}{Default is getwd(). Specifies path for where to read from (if formulafile specified) and write to.}
\item{formulafile}{Default if this is blank is to use data('ejscreenformulas'). Otherwise filename must be specified.
If not specified, function loads this as data().}
}
\value{
Returns a data.frame with some or all of input fields, plus calculated new fields.
}
\description{
Start with raw counts from demographic survey data, and environmental data,
and rename fields to use friendly variable names.
}
\seealso{
\code{\link{ejscreenformulas}} \code{\link{change.fieldnames.ejscreen.csv}} analyze.stuff::change.fieldnames()
}
|
c732aa7609f6e7947c1e30fbde2756777f879838 | 6a87801a407c16c1fc2c79b9e0a99adfbbe69821 | /phase2/P-arm-loss.R | a3f8863c0ee41265c384ac2d2ab4ef76a92eb258 | [] | no_license | Shicheng-Guo/marshfield | 57b2d1416e501665489004eebf4f81cec64ef4f6 | b06bca005aed3fb653e5113b6037612cc527b260 | refs/heads/master | 2021-05-03T13:39:30.180468 | 2018-11-21T00:11:15 | 2018-11-21T00:11:15 | 120,510,507 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 377 | r | P-arm-loss.R |
plink --bfile ../S_Hebbring_Unr.Guo --keep parmloss.txt --chr 23 --allow-no-sex --recode --tab --transpose --out 1176608-1-0238062177
setwd("C:\\Users\\guosa\\Downloads")
data<-read.table("1176608-1-0238062177.tped",head=F)
het<-apply(data,1,function(x) sum(! as.character(x[5])==as.character(x[6])))
plot(het~data$V4,col="red",cex=2,xlab="Chromosome X",ylab="Heterozygote")
|
4b01fc5e128e9305ca679c4d4030fc2587c21721 | 3ab965f5bd927e483e38c6ebd8aac1334f9161cf | /R/calc_furcation.R | a66c5dd01104093dda33c1a8c30bd97527871526 | [] | no_license | cran/rehh | 9fe8561d8ae917a9a39b2ba1977f2f8a80b97890 | 8352ae4f74a46cf03a5388ef8ed522ab4d8a625b | refs/heads/master | 2023-04-07T08:26:08.911479 | 2021-09-15T12:40:02 | 2021-09-15T12:40:02 | 17,699,118 | 7 | 3 | null | null | null | null | UTF-8 | R | false | false | 7,671 | r | calc_furcation.R | #'calculate furcation trees around a focal marker
#'@description Calculate furcation trees around a focal marker. A furcation tree captures
#'in greater detail than EHH values the decrease of extended haplotype homozygosity at
#'increasing distances from the selected focal marker.
#'@param haplohh an object of class haplohh (see \code{\link{data2haplohh}}).
#'@param mrk integer representing the number of the focal marker within the haplohh object
#'or string representing its ID/name.
#'@param allele a vector of alleles as coded internally, i.e. in case of polarized alleles,
#'0 represents the ancestral, 1 or higher the derived alleles.
#'If \code{NULL}, all alleles of the focal marker are considered.
#'@param limhaplo if there are less than \code{limhaplo} chromosomes that can be used for
#'the calculation, it is stopped. This is useful in case of missing data,
#'which lead to a successive exclusion of haplotypes: the further away from the focal marker
#'the less haplotypes are evaluated.
#'@param phased logical. If \code{TRUE} (default), chromosomes are expected to be phased.
#'If \code{FALSE}, consecutive chromosomes are assumed to
#'belong to diploid individuals and furcation trees are limited to within individuals which
#'are homozygous at the focal marker.
#'@param polarized logical. Affects only the order of furcations. If \code{TRUE} (default), the ancestral allele
#'becomes the first furcation and derived alleles are sorted by their internal coding. Otherwise all alleles
#'are sorted by their internal coding.
#'@details A haplotype furcation tree visualizes the breakdown
#'of LD at increasing distances from the focal marker.
#'The root of each tree is an allele of the focal marker, which in turn is identified
#'by a vertical dashed line.
#'Moving either to the "left" or to the "right" of the focal marker, each further marker is an opportunity for a node;
#'the tree either divides or does not, based on whether alleles at that marker
#'distinguish between hitherto identical extended haplotypes.
#'The thickness of the lines corresponds to the number of chromosomes sharing an extended haplotype.
#'@return An object of class furcation, containing the furcation structure of the specified alleles at the focal marker.
#'@seealso \code{\link{plot.furcation}}, \code{\link{calc_haplen}}.
#'@references Sabeti, P.C. and Reich, D.E. and Higgins, J.M. and Levine, H.Z.P and Richter, D.J. and Schaffner, S.F. and Gabriel, S.B. and Platko, J.V. and Patterson, N.J. and McDonald, G.J. and Ackerman, H.C. and Campbell, S.J. and Altshuler, D. and Cooper, R. and Kwiatkowski, D. and Ward, R. and Lander, E.S. (2002). Detecting recent positive selection in the human genome from haplotype structure. Nature, 419, 832-837.
#'@examples #example haplohh object (280 haplotypes, 1424 SNPs)
#'#see ?haplohh_cgu_bta12 for details
#'data(haplohh_cgu_bta12)
#'#plotting a furcation diagram for both ancestral and derived allele
#'#from the marker "F1205400"
#'#which display a strong signal of selection
#'f <- calc_furcation(haplohh_cgu_bta12, mrk = "F1205400")
#'plot(f)
#'@export
#'@importFrom methods new
calc_furcation <-
function(haplohh,
mrk,
allele = NA,
limhaplo = 2,
phased = TRUE,
polarized = TRUE) {
##check parameters
if (!(is.haplohh(haplohh))) {
stop("Data is not a valid haplohh object.", call. = FALSE)
}
if (limhaplo < 2) {
stop("limhapcount must be an integer greater than 1.", call. = FALSE)
}
if (is.numeric(mrk)) {
mrk <- as.integer(mrk)
if (mrk < 1) {
stop(paste0("No marker numbers smaller than 1 allowed."), call. = FALSE)
}
if (mrk > nmrk(haplohh)) {
stop(
paste0(
"The marker number ",
mrk,
" is greater than the number of markers in the data set (",
nmrk(haplohh),
")."
),
call. = FALSE
)
}
} else{
mrk <- as.character(mrk)
if (!(mrk %in% mrk.names(haplohh))) {
stop(paste0("Marker '",
mrk,
"' not found."), call. = FALSE)
}
mrk <- which(mrk.names(haplohh) == mrk)
}
## order alleles by their internal coding
mrk_allele <- sort(unique(haplo(haplohh)[, mrk]))
## create description of alleles ("Ancestral", "Major", etc.)
if (polarized) {
if (sum(mrk_allele != 0L) > 1) {
index_other <- seq_len(sum(mrk_allele != 0L))
} else{
index_other <- ""
}
description <- paste0("Derived", index_other)
# if ancestral allele is present, set it first
if (0L %in% mrk_allele) {
mrk_allele <- c(0L, mrk_allele[mrk_allele != 0L])
description <- c("Ancestral", description)
}
} else{
if (length(mrk_allele) > 2) {
index_other <- seq_along(mrk_allele[-1])
} else{
index_other <- ""
}
description <- c("Major", paste0("Minor", index_other))
}
if (anyNA(allele) | length(allele) == 0) {
allele <- mrk_allele
} else{
if (!is.numeric(allele)) {
stop("Allele has to be specified by an integer.")
}
allele <- as.integer(allele)
for (i in allele) {
if (!(i %in% mrk_allele)) {
stop(paste0("Marker has no allele '", i, "'."))
}
}
}
##perform calculation
f <- furcation(
mrk.name = ifelse(
is.null(mrk.names(haplohh)),
as.character(mrk),
mrk.names(haplohh)[mrk]
),
position = positions(haplohh)[mrk],
xlim = range(positions(haplohh)),
nhap = nhap(haplohh)
)
#calculate allele furcations
for (a in allele) {
# calculate ftree for left and right side
allelefurcation <- new("allelefurcation",
allele = a,
description = description[which(mrk_allele == a)])
for (side in 1:2) {
# first resp. last marker defines side
allelefurcation_list <- .Call(
"CALL_FURCATION",
haplohh@haplo,
nhap(haplohh),
mrk,
ifelse(side == 1, 1L, nmrk(haplohh)),
a,
limhaplo,
phased
)
ftree <- new("ftree")
#change indexing from C to R
ftree@node_parent <- allelefurcation_list[[2]] + 1L
#replace marker index by its chromosomal position
ftree@node_pos <-
positions(haplohh)[allelefurcation_list[[1]] + 1L]
#transform to logical
ftree@node_with_missing_data <-
as.logical(allelefurcation_list[[3]])
#change indexing from C to R
ftree@label_parent <- allelefurcation_list[[4]] + 1L
#add node "names" for human readability;
#this is NOT needed for computation
names(ftree@node_pos) <- seq_along(ftree@node_pos)
names(ftree@node_parent) <- seq_along(ftree@node_parent)
names(ftree@node_with_missing_data) <-
seq_along(ftree@node_with_missing_data)
names(ftree@label_parent) <- seq_along(ftree@label_parent)
if (side == 1) {
allelefurcation@left <- ftree
} else{
allelefurcation@right <- ftree
}
}
allelefurcation@count <- as.integer(sum(!(
is.na(allelefurcation@left@label_parent) &
is.na(allelefurcation@right@label_parent)
)))
if (allelefurcation@count > 0) {
f[[as.character(a)]] <- allelefurcation
}
}
return(f)
}
|
5698e43e0071d02810680f0b12bae2dd7b1488b4 | 253a070d7b4c5a2cd9b4c7b863642599d2067ca3 | /movielens_rscript.R | ac6c88a1b21a53972d1bab64a09f2e8eccff056b | [] | no_license | Dinicharia/movielens | e40d1b0c34937081cdce39b129f56c56ea9b28b1 | 5bf2636901ce52f375fccde5d9c49a4315911785 | refs/heads/main | 2023-06-29T23:16:24.731714 | 2021-07-02T04:00:35 | 2021-07-02T04:00:35 | 377,329,981 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,684 | r | movielens_rscript.R | ##########################################################
# Create edx set, validation set (final hold-out test set)
##########################################################
# Note: this process could take a couple of minutes
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
library(tidyverse)
library(caret)
library(data.table)
#Adding more libraries
library(ggplot2)
library(lubridate)
library(knitr)
library(kableExtra)
library(anytime)
library(Matrix)
library(matrixStats)
library(dplyr)
# MovieLens 10M dataset:
# https://grouplens.org/datasets/movielens/10m/
# http://files.grouplens.org/datasets/movielens/ml-10m.zip
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
ratings <- fread(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(movieId),
title = as.character(title),
genres = as.character(genres))
movielens <- left_join(ratings, movies, by = "movieId")
# Validation set will be 10% of MovieLens data
set.seed(1, sample.kind="Rounding")
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# Make sure userId and movieId in validation set are also in edx set
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
rm(dl, ratings, movies, test_index, temp, movielens, removed)
head(edx)
head(validation)
#unique users and movies in the edx dataset
edx %>%
summarize(users = n_distinct(userId),
movies = n_distinct(movieId),
avg_rating = round(mean(edx$rating),2),
uniq_genres = n_distinct(edx$genres))
#unique users and movies in the validation dataset
validation %>%
summarize(users = n_distinct(userId),
movies = n_distinct(movieId),
avg_rating = round(mean(validation$rating),2),
uniq_genres = n_distinct(validation$genres))
#Summary of the edx and validation data frames using kable
summary(edx) %>% kable(caption = "Top rows of edx data frame") %>%
kable_styling(font_size = 10, position = "center",
latex_options = c("scale_down", "HOLD_position"))
summary(validation) %>% kable(caption = "Top rows of validation data frame") %>%
kable_styling(font_size = 10, position = "center",
latex_options = c("scale_down", "HOLD_position"))
# Distribution of ratings by movieId
edx %>%
dplyr::count(movieId) %>%
ggplot(aes(n)) +
geom_histogram( bins=20, color = "black") +
scale_x_log10() +
ggtitle("Ratings by movieID") +
labs(x="movieId" ,y="number of ratings")
# Distribution of ratings by userID
edx %>%
dplyr::count(userId) %>%
ggplot(aes(n)) +
geom_histogram( bins=20, color = "black") +
scale_x_log10() +
ggtitle("Ratings by userID") +
labs(x="userId" , y="number of ratings")
#Star ratings
#options(scipen = 100) # To avoid scientific notation(..optional)
edx %>%
group_by(rating) %>%
summarize(count = n()) %>%
ggplot(aes(x = rating, y = count)) +
geom_point()+
labs(x="Star Rating", y="Number of ratings") +
ggtitle("Number of ratings per star")
#the top ten title and genres
edx %>%
group_by(title, genres) %>%
summarize(count=n()) %>%
arrange(desc(count))
# the top 15 movies which count the major number of ratings
top_title <- edx %>%
group_by(title) %>%
summarize(count=n()) %>%
top_n(15,count) %>%
arrange(desc(count))
#bar chart of top_title
top_title %>%
ggplot(aes(x=reorder(title, count), y=count)) +
geom_bar(stat="identity", fill="black") + coord_flip(y=c(0, 40000)) +
labs(x="Title", y="Number of Ratings") +
geom_text(aes(label= count), hjust=-0.1, size=3) +
labs(title="Top 15 Movie Titles")
#movie ratings by genres
memory.limit(size=56000) #expanding memory allocated to R
edx %>% separate_rows(genres, sep = "\\|") %>%
group_by(genres) %>%
summarize(count = n()) %>%
arrange(desc(count))
#transforming userID and movieId as factors for analysis
edx$userId <- as.factor(edx$userId)
edx$movieId <- as.factor(edx$movieId)
edx$genres <- as.factor(edx$genres)
edx$timestamp <- as.POSIXct(edx$timestamp, origin = "1970-01-01") # Convert timestamp to POSIXct.
edx <- edx %>%
mutate(title = str_trim(title)) %>%
extract(title, c("title_tmp", "year"), # extracting the release year of the movie from the title column
regex = "^(.*) \\(([0-9 \\-]*)\\)$",
remove = F) %>%
mutate(year = if_else(str_length(year) > 4, #createing year column.
as.integer(str_split(year, "-",
simplify = T)[1]),
as.integer(year))) %>%
mutate(title = if_else(is.na(title_tmp), title, title_tmp)) %>%
select(-title_tmp) %>%
mutate(genres = if_else(genres == "(no genres listed)",
`is.na<-`(genres), genres))
edx <- edx %>% mutate(year_rate = year(timestamp)) # creating a column for the year the movie was rated
# It extracts the year that the rate was given by the user.
edx <- edx %>% select(-timestamp) # removing the timsestamp column (optional)
edx$genres <- as.factor(edx$genres)
head(edx)
summary(edx$rating) #edx rating summary
#genres rating using the mean
edx %>% group_by(genres) %>% summarize(avg_rating = mean(rating)) %>% arrange(desc(avg_rating))
edx <- edx %>% select(userId, movieId, rating) #Three parameter of interest
# Create the index
test_index <- createDataPartition(edx$rating, times = 1, p = .2, list = F)
#Creating the train and test sets
train <- edx[-test_index, ] # The train set
test <- edx[test_index, ] # The test set
test <- test %>% # The same movieId and usersId appears in both set. (Not the same cases)
semi_join(train, by = "movieId") %>%
semi_join(train, by = "userId")
dim(train)
dim(test)
#generating the model
mu_0 <- mean(train$rating) # Mean rating on train set
RMSE_0 <- RMSE(test$rating,mu_0) # RMSE in the test set.
RMSE_0
# obtaining prediction using the mean from movie and user effect
mu <- mean(train$rating)
m_avgs <- train %>%
group_by(movieId) %>%
summarize(mi = mean(rating - mu)) #movie effect
u_avgs <- test %>%
left_join(m_avgs, by = "movieId") %>%
group_by(userId) %>%
summarize(ui = mean(rating - mu -mi)) #user effect
predicted_ratings <- test %>%
left_join(m_avgs, by = "movieId") %>%
left_join(u_avgs, by = "userId") %>%
mutate(pred = mu +mi +ui) %>% .$pred
RMSE_1 <- RMSE(predicted_ratings, test$rating)
RMSE_1
#prediction using the validation dataset
validation <- validation %>% select(userId, movieId, rating) #we are interested in userId, movieId, & rating
#treating userId & movieId as factors
validation$userId <- as.factor(validation$userId)
validation$movieId <- as.factor(validation$movieId)
validation <- validation[complete.cases(validation), ]
#The prediction
mu <- mean(train$rating)
m_avgs <- train %>%
group_by(movieId) %>%
summarize(mi = mean(rating - mu))
u_avgs <- test %>%
left_join(m_avgs, by = "movieId") %>%
group_by(userId) %>%
summarize(ui = mean(rating - mu -mi))
predicted_ratings <- test %>%
left_join(m_avgs, by = "movieId") %>%
left_join(u_avgs, by = "userId") %>%
mutate(pred = mu +mi +ui) %>% .$pred
predicted_val <- validation %>%
left_join(m_avgs, by = "movieId") %>%
left_join(u_avgs, by = "userId") %>%
mutate(pred = mu +mi +ui) %>% .$pred
RMSE_VAL <- RMSE(predicted_val, validation$rating, na.rm = T)
RMSE_VAL
#regularizing test data
#with regularization, we evaluate different values for lambda, which a tuning parameter.
lambda_values <- seq(0, 10, .25)
t_RMSE_reg <- sapply(lambda_values, function(l){
mu <- mean(train$rating)
mi <- train %>%
group_by(movieId) %>%
summarize(mi = sum(rating - mu)/(n()+l)) #movie effect
ui <- train %>%
left_join(mi, by="movieId") %>%
group_by(userId) %>%
summarize(ui = sum(rating -mi - mu)/(n()+l)) #user effect
predicted_ratings <- test %>%
left_join(mi, by = "movieId") %>%
left_join(ui, by = "userId") %>%
mutate(pred = mu +mi +ui) %>% .$pred
return(RMSE(predicted_ratings, test$rating))
})
t_lambda <- lambda_values[which.min(t_RMSE_reg)]
t_lambda #Lambda minimizing the RMSE
#regularization on validation dataset
val_RMSE_reg <- sapply(lambda_values, function(l){
mu <- mean(train$rating)
mi <- train %>%
group_by(movieId) %>%
summarize(mi = sum(rating - mu)/(n()+l)) #movie effect
ui <- train %>%
left_join(mi, by="movieId") %>%
group_by(userId) %>%
summarize(ui = sum(rating -mi - mu)/(n()+l)) #user effect
predicted_val_reg <- validation %>%
left_join(mi, by = "movieId") %>%
left_join(ui, by = "userId") %>%
mutate(pred = mu +mi +ui) %>% .$pred
return(RMSE(predicted_val_reg, validation$rating, na.rm = T))
})
val_lambda <- lambda_values[which.min(val_RMSE_reg)]
val_lambda # Lambda minimizing the RMSE
min_rmse <- min(val_RMSE_reg) # Best RMSE
min_rmse
|
4b43b1bf83a06b7eb8619aa37f22aa47867ed686 | e064b65266998795d9cc0f6c8dfff6ea5be2e3ea | /R/Shiny.R | dc9b38d66eb58ce88ad63417a85b365ba409b294 | [
"Apache-2.0"
] | permissive | Jake-Gillberg/CohortDiagnostics | 2e3d2d30f06e9b515e90fabc1250eb0374829268 | 98af47ed799849f5650607ed3b20880dbb14208e | refs/heads/master | 2023-08-27T13:57:03.444582 | 2021-10-12T17:39:03 | 2021-10-12T17:39:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,910 | r | Shiny.R | # Copyright 2021 Observational Health Data Sciences and Informatics
#
# This file is part of CohortDiagnostics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Launch the Diagnostics Explorer Shiny app
#' @param connectionDetails An object of type \code{connectionDetails} as created using the
#' \code{\link[DatabaseConnector]{createConnectionDetails}} function in the
#' DatabaseConnector package, specifying how to connect to the server where
#' the CohortDiagnostics results have been uploaded using the
#' \code{\link{uploadResults}} function.
#' @param resultsDatabaseSchema The schema on the database server where the CohortDiagnostics results
#' have been uploaded.
#' @param vocabularyDatabaseSchema (Deprecated) Please use vocabularyDatabaseSchemas.
#' @param vocabularyDatabaseSchemas (optional) A list of one or more schemas on the database server where the vocabulary tables are located.
#' The default value is the value of the resultsDatabaseSchema. We can provide a list of vocabulary schema
#' that might represent different versions of the OMOP vocabulary tables. It allows us to compare the impact
#' of vocabulary changes on Diagnostics.
#' @param dataFolder A folder where the premerged file is stored. Use
#' the \code{\link{preMergeDiagnosticsFiles}} function to generate this file.
#' @param dataFile (Optional) The name of the .RData file with results. It is commonly known as the
#' Premerged file.
#' @param runOverNetwork (optional) Do you want the app to run over your network?
#' @param port (optional) Only used if \code{runOverNetwork} = TRUE.
#' @param launch.browser Should the app be launched in your default browser, or in a Shiny window.
#' Note: copying to clipboard will not work in a Shiny window.
#' @param aboutText Text (using HTML markup) that will be displayed in an About tab in the Shiny app.
#' If not provided, no About tab will be shown.
#'
#' @details
#' Launches a Shiny app that allows the user to explore the diagnostics
#'
#' @export
launchDiagnosticsExplorer <- function(dataFolder = "data",
dataFile = "PreMerged.RData",
connectionDetails = NULL,
resultsDatabaseSchema = NULL,
vocabularyDatabaseSchema = NULL,
vocabularyDatabaseSchemas = resultsDatabaseSchema,
aboutText = NULL,
runOverNetwork = FALSE,
port = 80,
launch.browser = FALSE) {
if (!is.null(connectionDetails) &&
connectionDetails$dbms != "postgresql") {
stop("Shiny application can only run against a Postgres database")
}
if (!is.null(connectionDetails)) {
dataFolder <- NULL
dataFile <- NULL
if (is.null(resultsDatabaseSchema)) {
stop("resultsDatabaseSchema is required to connect to the database.")
}
if (!is.null(vocabularyDatabaseSchema) &
is.null(vocabularyDatabaseSchemas)) {
vocabularyDatabaseSchemas <- vocabularyDatabaseSchema
warning(
'vocabularyDatabaseSchema option is deprecated. Please use vocabularyDatabaseSchema.'
)
}
}
ensure_installed("checkmate")
ensure_installed("DatabaseConnector")
ensure_installed("dplyr")
ensure_installed("DT")
ensure_installed("ggplot2")
ensure_installed("ggiraph")
ensure_installed("gtable")
ensure_installed("htmltools")
ensure_installed("lubridate")
ensure_installed("pool")
ensure_installed("purrr")
ensure_installed("scales")
ensure_installed("shiny")
ensure_installed("shinydashboard")
ensure_installed("shinyWidgets")
ensure_installed("stringr")
ensure_installed("SqlRender")
ensure_installed("tidyr")
ensure_installed("CirceR")
ensure_installed("rmarkdown")
appDir <-
system.file("shiny", "DiagnosticsExplorer", package = "CohortDiagnostics")
if (launch.browser) {
options(shiny.launch.browser = TRUE)
}
if (runOverNetwork) {
myIpAddress <- system("ipconfig", intern = TRUE)
myIpAddress <- myIpAddress[grep("IPv4", myIpAddress)]
myIpAddress <- gsub(".*? ([[:digit:]])", "\\1", myIpAddress)
options(shiny.port = port)
options(shiny.host = myIpAddress)
}
shinySettings <- list(
connectionDetails = connectionDetails,
resultsDatabaseSchema = resultsDatabaseSchema,
vocabularyDatabaseSchemas = vocabularyDatabaseSchemas,
dataFolder = dataFolder,
dataFile = dataFile,
aboutText = aboutText
)
.GlobalEnv$shinySettings <- shinySettings
on.exit(rm("shinySettings", envir = .GlobalEnv))
shiny::runApp(appDir = appDir)
}
#' Premerge Shiny diagnostics files
#'
#' @description
#' This function combines diagnostics results from one or more databases into a single file. The result is a
#' single file that can be used as input for the Diagnostics Explorer Shiny app.
#'
#' It also checks whether the results conform to the results data model specifications.
#'
#' @param dataFolder folder where the exported zip files for the diagnostics are stored. Use
#' the \code{\link{runCohortDiagnostics}} function to generate these zip files.
#' Zip files containing results from multiple databases may be placed in the same
#' folder.
#' @param tempFolder A folder on the local file system where the zip files are extracted to. Will be cleaned
#' up when the function is finished. Can be used to specify a temp folder on a drive that
#' has sufficient space if the default system temp space is too limited.
#'
#' @export
preMergeDiagnosticsFiles <-
function(dataFolder, tempFolder = tempdir()) {
zipFiles <-
dplyr::tibble(
zipFile = list.files(
dataFolder,
pattern = ".zip",
full.names = TRUE,
recursive = TRUE
),
unzipFolder = ""
)
ParallelLogger::logInfo("Merging ", nrow(zipFiles), " zip files.")
unzipMainFolder <-
tempfile("unzipTempFolder", tmpdir = tempFolder)
dir.create(path = unzipMainFolder, recursive = TRUE)
on.exit(unlink(unzipMainFolder, recursive = TRUE))
for (i in 1:nrow(zipFiles)) {
ParallelLogger::logInfo("- Unzipping ", basename(zipFiles$zipFile[i]))
unzipFolder <-
file.path(unzipMainFolder, sub(".zip", "", basename(zipFiles$zipFile[i])))
dir.create(unzipFolder)
zip::unzip(zipFiles$zipFile[i], exdir = unzipFolder)
zipFiles$unzipFolder[i] <- unzipFolder
}
specifications <- getResultsDataModelSpecifications()
# Storing output in an environment for now. If things get too big, we may want to write
# directly to CSV files for insertion into database:
newEnvironment <- new.env()
processTable <- function(tableName, env) {
ParallelLogger::logInfo("Processing table ", tableName)
csvFileName <- paste0(tableName, ".csv")
data <- dplyr::tibble()
for (i in 1:nrow(zipFiles)) {
if (csvFileName %in% list.files(zipFiles$unzipFolder[i])) {
newData <-
readr::read_csv(
file.path(zipFiles$unzipFolder[i], csvFileName),
col_types = readr::cols(),
guess_max = min(1e6)
)
if (nrow(newData) > 0) {
newData <- checkFixColumnNames(
table = newData,
tableName = tableName,
zipFileName = zipFiles$zipFile[i],
specifications = specifications
)
newData <- checkAndFixDataTypes(
table = newData,
tableName = tableName,
zipFileName = zipFiles$zipFile[i],
specifications = specifications
)
newData <- checkAndFixDuplicateRows(
table = newData,
tableName = tableName,
zipFileName = zipFiles$zipFile[i],
specifications = specifications
)
data <- appendNewRows(
data = data,
newData = newData,
tableName = tableName,
specifications = specifications
)
}
}
}
if (nrow(data) == 0) {
ParallelLogger::logInfo("- No data found for table ", tableName)
} else {
colnames(data) <- SqlRender::snakeCaseToCamelCase(colnames(data))
assign(SqlRender::snakeCaseToCamelCase(tableName),
data,
envir = env)
}
}
invisible(lapply(unique(specifications$tableName), processTable, env = newEnvironment))
ParallelLogger::logInfo("Creating PreMerged.Rdata file. This might take some time.")
save(
list = ls(newEnvironment),
envir = newEnvironment,
compress = TRUE,
compression_level = 2,
file = file.path(dataFolder, "PreMerged.RData")
)
rm(list = ls(newEnvironment), envir = newEnvironment)
ParallelLogger::logInfo("Merged data saved in ",
file.path(dataFolder, "PreMerged.RData"))
}
#' Launch the CohortExplorer Shiny app
#'
#' @template CohortTable
#'
#' @template CdmDatabaseSchema
#'
#' @param connectionDetails An object of type \code{connectionDetails} as created using the
#' \code{\link[DatabaseConnector]{createConnectionDetails}} function in the
#' DatabaseConnector package.
#' @param cohortId The ID of the cohort.
#' @param sampleSize Number of subjects to sample from the cohort. Ignored if subjectIds is specified.
#' @param subjectIds A vector of subject IDs to view.
#'
#' @details
#' Launches a Shiny app that allows the user to explore a cohort of interest.
#'
#' @export
launchCohortExplorer <- function(connectionDetails,
cdmDatabaseSchema,
cohortDatabaseSchema,
cohortTable,
cohortId,
sampleSize = 100,
subjectIds = NULL) {
ensure_installed("shiny")
ensure_installed("DT")
ensure_installed("plotly")
ensure_installed("RColorBrewer")
ensure_installed("ggplot2")
ensure_installed("magrittr")
.GlobalEnv$shinySettings <-
list(
connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
cohortDefinitionId = cohortId,
sampleSize = sampleSize,
subjectIds = subjectIds
)
on.exit(rm("shinySettings", envir = .GlobalEnv))
appDir <-
system.file("shiny", "CohortExplorer", package = "CohortDiagnostics")
shiny::runApp(appDir)
}
# Borrowed from devtools:
# https://github.com/hadley/devtools/blob/ba7a5a4abd8258c52cb156e7b26bb4bf47a79f0b/R/utils.r#L44
is_installed <- function(pkg, version = 0) {
installed_version <-
tryCatch(
utils::packageVersion(pkg),
error = function(e)
NA
)
! is.na(installed_version) && installed_version >= version
}
# Borrowed and adapted from devtools:
# https://github.com/hadley/devtools/blob/ba7a5a4abd8258c52cb156e7b26bb4bf47a79f0b/R/utils.r#L74
ensure_installed <- function(pkg) {
if (!is_installed(pkg)) {
msg <-
paste0(sQuote(pkg), " must be installed for this functionality.")
if (interactive()) {
message(msg, "\nWould you like to install it?")
if (menu(c("Yes", "No")) == 1) {
if (pkg == 'CirceR') {
ensure_installed("remotes")
message(msg, "\nInstalling from Github using remotes")
remotes::install_github("OHDSI/CirceR")
} else {
install.packages(pkg)
}
} else {
stop(msg, call. = FALSE)
}
} else {
stop(msg, call. = FALSE)
}
}
}
|
e4512d88222ad13ac254a92d8d004b4ec2be9332 | 1749987a842830efe70397f8ca036b8ac0936356 | /man/ihazr.Rd | 05506795a92de0f25a1eac848f7a629995e341e8 | [] | no_license | webbkyle/ihazr | f30a5faf9bccc2dd78be3bbc73d119df0cf98b1b | e8d3ee5f8cf330cef885ee84d48c5b328d329f4c | refs/heads/master | 2020-03-25T11:56:02.928173 | 2018-10-25T16:24:20 | 2018-10-25T16:24:20 | 143,754,439 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 5,038 | rd | ihazr.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ihazr.R
\name{ihazr}
\alias{ihazr}
\title{ihazr}
\usage{
ihazr(time, status, marker, buttons = TRUE, bandMax = 10,
width = NULL, height = NULL)
}
\arguments{
\item{time}{A vector of observed follow up times.}
\item{status}{A vector of status indicators, usually 0=alive, 1=dead.}
\item{marker}{A matrix or data frame or tibble of marker (covariate) values
where each column is a covariate and each row is an observation. The column
names will be used to identify each marker in the resulting interactive
application.}
\item{buttons}{TRUE or FALSE, where TRUE means each marker will have its own
button and FALSE means markers are selected via a dropdown menu. TRUE is
recommended for few markers and FALSE is recommended for many markers. The
default is to use buttons (TRUE).}
\item{bandMax}{A number representing the maximum bandwidth for the Epanechnikov
kernel. The interactive application has an area for graphically adjusting the
Epanechnikov kernel bandwidth for smoothing the time dimension. This value sets
the maximum bandwidth available. The number defaults to 10 but may be adjusted
depending on the scale of the \code{time} vector.}
\item{width}{The width of the html widget. The default is NULL, which results
in intelligent automatic sizing based on the widget's container.}
\item{height}{The height of the html widget. The default is NULL, which
results in intelligent automatic sizing based on the widget's container.}
}
\value{
A JavaScript D3.js web application. The top display is a scatterplot
of follow up time (x-axis) versus selected marker value (y-axis) with censoring
status indicated by the color/style of each point. The bottom display
dynamically updates nonparametric estimates of the Nelson-Aalen function and
conditional hazard function.
}
\description{
\code{ihazr} stands for "interative hazard regression." The function can be
used as an exploratory analysis tool for survival data. \code{ihazr} provides
an R user interface for outputting a JavaScript D3.js web application. The
user provides a vector of survival times, a vector of censoring statuses, and
a matrix (or data.frame or tibble) of marker (covariate) values. The function
outputs an interactive application where the user can 1) select which marker
to plot against time and 2) graphically resize the time and marker bandwidths
for calculating a nonparametric conditional hazard function estimate.
}
\details{
This funciton creates an interactive web application for calculating
non parametric conditional hazard function estimates for univariate marker
values. Details for the conditional hazard estimator are in McKeague and Utikal
(1990), but the general idea is to 1) calculate the Nelson-Aalen estimator on
the subset of data that has marker values falling within a specified window
2) smooth the result using an Epanechnikov kernel (analogous to the way kernel
density estimation can be seen as a kernel smoothing of the empirical cumulative
distribution function). \cr \cr
The estimation procedure requires both a marker bandwidth (to specify the width
of the window) and a time bandwidth (to specify the Epanechnikov kernel). The
main interactive capability of the application is to allow the user to graphically
adjust both of these bandwidths and get immediate visual feedback on how the
resulting estimates change. \cr \cr
ihazr user interface: \cr
- Move cursor over the scatterplot to subset data and click to freeze/unfreeze the selection. \cr
- Click on buttons or options in a dropdown to select different covariates. \cr
- Double-click Maximum, Minimum, or Bandwidth text boxes to input numerical values.
Press Enter to commit the value. Click on the scatterplot to clear the input box. \cr
- Mouseover the gray bar below the Epanechnikov kernel to change the kernel's bandwidth.
Click to freeze/unfreeze the bandwidth selection. \cr \cr
\code{ihazr} was developed using D3.js and \code{htmlwidgets}.
}
\examples{
#Example 1 - simulated data
time_t <- runif(50, 0, 10)
status_t <- rbinom(50, 1, .7)
age_t <- runif(50, 20, 80)
ihazr(time_t, status_t, age_t)
#Example 2 - with survival data
library(survival)
library(dplyr)
pbc5 <- pbc \%>\%
slice(1:312) \%>\%
select(time, status, age, edema, bili, albumin, protime) \%>\%
mutate(time = time/365, status = (status==2)*1, bili = log(bili),
protime = log(protime))
ihazr(time=pbc5[,1], status=pbc5[,2], marker=pbc5[,3:7])
#Example 3 -- mgus2
library(survival)
library(dplyr)
mgusN <- mgus2 \%>\%
mutate(time = futime/12)
ihazr(time=mgusN$time, status=mgusN$death, marker=mgusN[,c(2:6,8)], buttons = FALSE, bandMax=8)
}
\references{
McKeague, IW and Utikal, KJ (1990). Inference for a Nonlinear Counting Process
Regression Model. \emph{Annals of Statistics}.
\href{https://doi.org/10.1214/aos/1176347745}{link} \cr \cr
Wang, JL (2005). Smoothing Hazard Rates. \emph{Encyclopedia of Biostatistics}.
\href{https://doi.org/10.1002/0470011815.b2a11069}{link}
}
|
22a7368418bd7c08559486145adf4458808eab43 | 6a8af4e89e5eb04762f7629c7be2af71f340413a | /man/dimension.Rd | eba6f352af840fa15dd6a11aa160269ac06bd2f4 | [
"Apache-2.0"
] | permissive | comchangs/RDruid | 50c693af63bf7d2d44ccf81ad23283269f64b165 | 75f8e6f49e4ccf44954f476f1ade32eca3821f5e | refs/heads/master | 2021-01-24T23:13:21.102885 | 2016-01-08T05:41:29 | 2016-02-22T09:37:35 | 36,798,726 | 0 | 0 | null | 2015-06-03T11:14:28 | 2015-06-03T11:14:28 | null | UTF-8 | R | false | false | 282 | rd | dimension.Rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{dimension}
\alias{dimension}
\title{Creates a Druid dimension object}
\usage{
dimension(name)
}
\arguments{
\item{name}{dimension name}
}
\value{
a Druid dimension object
}
\description{
Creates a Druid dimension object
}
|
705dc4c8ddf738acb2d98aa297279dcec67a6cc2 | c07c277199acfc12cb113953b673af550cd7c52a | /ENAR_scraper/ENAR_Scraper.R | 907caefab4e4957fc3c502ffa0284507f18387b4 | [] | no_license | muschellij2/ENAR_Over_Time | c9a0341cc3332d4cd845a6f3d1c0f2ef0cb90458 | 0cdf7a98fab391f7a81426af6b10895b09e9a24e | refs/heads/master | 2016-09-05T15:25:17.124870 | 2014-03-12T22:09:24 | 2014-03-12T22:09:24 | 17,357,132 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,516 | r | ENAR_Scraper.R | rm(list=ls())
library(tm)
library(stringr)
uri <- "2013_abstract.pdf"
# download.file("http://www.enar.org/meetings2013/2013_abstracts.pdf", uri)
last <- function(x) {
l <- length(x)
if (l == 0) return(NA)
x[l]
}
absl <- readPDF(control = list(text = "-layout"))(elem = list(uri = uri),
language = "en",
id = "idabs")
xabs <- abs <- readPDF(control = list(text = "-nopgbrk -f 2 -l 118"))(elem = list(uri = uri),
language = "en",
id = "idabs")
abs <- xabs[ !xabs%in% c("CT", "S|", "ENAR 2013", "A", "R", "T", "ABS", "NT",
"POSTER PR", "S", "E", "ES", "I", "AT", "O", "N", "Spring Meeting", "March 10 – 13",
"Abstracts", "AB", "ENAR 2013 • Spring Meeting • March 10–13", "|", "C", "ON",
"PO S", "ST", "TER PRESE", "TS")]
emails <- grepl("^email:", abs)
all.caps <- gsub("\\d*[a-z]\\. \\a(.*)", "\\1", abs)
# strp
ss <- strsplit(abs, " ")
last.words <- sapply(ss, last)
have.comma <- grepl(",", ss)
last.words <- last.words[ !grepl("@", last.words)]
#
#
#
#
program <- "2013_program.pdf"
# download.file("http://www.enar.org/meetings2013/2013_Spring_Preliminary_Program.pdf", uri)
#
posters <- readPDF(control = list(text = "-f 22 -l 38"))(elem = list(uri = uri),
language = "en",
id = "idprog")
pres <- readPDF(control = list(text = "-f 39 -l 130 -layout"))(elem = list(uri = uri),
language = "en",
id = "idprog")
# pp2 <- strsplit(pres, "\t")
pp <- gsub("(.*((a|p)\\.\\m.)).*", "\\1", pres)
times <- grepl("(a|p)\\.\\m.", pp)
pp[!times] <- NA
### need to take out particulate matter
pp <- gsub("On the Use of a p.m.", "", pp)
### take out when session talks about lenght of next segment
pp[grepl("–", pp)] <- NA
pp <- gsub("[A-Z]", "", pp)
pp <- str_trim(pp)
pp <- str_trim(pp)
pp <- strsplit(pres, " ")
pp <- lapply(pp, str_trim)
pp <- lapply(pp, function(x) {
x <- x[ x!= ""]
if (length(x) < 1) return(c("", ""))
if (length(x) < 2) return(c("", x))
c(x[1], str_trim(paste(x[2:length(x)], sep="", collapse=" ")))
})
pp <- do.call("rbind", pp)
#
# ### ideas - getting all the |
0f01d990281d2b4cdc61bdca7ecf514c7fd64e37 | 25e234463075d57c376bfc80111626b14ae348bb | /man/cf_field_to_css.Rd | 53ee752b3340f7bfede8b93e77f0857431de88fe | [] | no_license | cran/condformat | e2480ef9432aecfe7543de3d1fb190c0780527d3 | a2d9cd81131dfb08ab840c8feec5a6f07033cab9 | refs/heads/master | 2022-12-03T22:27:41.731080 | 2022-11-26T10:20:03 | 2022-11-26T10:20:03 | 54,424,154 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 901 | rd | cf_field_to_css.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/render_html.R
\name{cf_field_to_css}
\alias{cf_field_to_css}
\title{How to export a cf_field to CSS}
\usage{
cf_field_to_css(cf_field, xview, css_fields, unlocked)
}
\arguments{
\item{cf_field}{A cf_field object. This is like a rule, but with the computed
colour values. It usually maps one-to-one to a CSS field.}
\item{xview}{A data frame with the columns to be printed and rows filtered}
\item{css_fields}{A list of matrices. The names of the list are CSS attributes and
each matrix is of the size of xview and contains the respective CSS values.}
\item{unlocked}{A logical matrix of cells unlocked (that can still be modified by further
rules).}
}
\value{
A list with two elements: css_fields and unlocked (with updated values)
}
\description{
This method is exported so package users can generate their own rules
}
|
fc5d4f19c2bad6311aed5ae4ca9d20fa26757d2f | 7eb63399fa00e3c547e5933ffa4f47de515fe2c6 | /man/GAinitialise.nullAverage.Rd | 7a1c9bbc8494303e68dd5880f44a1d5408238a89 | [] | no_license | bentaylor1/lgcp | a5cda731f413fb30e1c40de1b3360be3a6a53f19 | 2343d88e5d25ecacd6dbe5d6fcc8ace9cae7b136 | refs/heads/master | 2021-01-10T14:11:38.067639 | 2015-11-19T13:22:19 | 2015-11-19T13:22:19 | 45,768,716 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 567 | rd | GAinitialise.nullAverage.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/gridAverageMethods.R
\name{GAinitialise.nullAverage}
\alias{GAinitialise.nullAverage}
\title{GAinitialise.nullAverage function}
\usage{
\method{GAinitialise}{nullAverage}(F, ...)
}
\arguments{
\item{F}{an object of class nullAverage}
\item{...}{additional arguments}
}
\value{
nothing
}
\description{
This is a null function and performs no action.
}
\seealso{
\link{nullAverage}, \link{setoutput}, \link{GAinitialise}, \link{GAupdate}, \link{GAfinalise}, \link{GAreturnvalue}
}
|
1c0d9e13ddbe7e7e0e67086010217f69f7eff2ac | 53e070f00aa3fd8b1744487af95177633eb8b053 | /R/testhelpers.R | 7c01034613566db8e3144941621b7b9b1492064a | [
"MIT"
] | permissive | gluc/datalicenseR | 75eab57b44812b09a27925d45e9072a45012278b | 5c9b7fa590717b6d236245e11a75ee3bd69db0c3 | refs/heads/master | 2021-08-27T16:23:05.758221 | 2021-08-02T15:41:03 | 2021-08-02T15:41:03 | 34,683,123 | 2 | 3 | MIT | 2021-08-02T15:41:04 | 2015-04-27T18:18:02 | HTML | UTF-8 | R | false | false | 1,722 | r | testhelpers.R |
DESRounttrip <- function(fileContent, SUNOS = FALSE, HEX_KEY = FALSE, ECB = FALSE, UUENC = FALSE, uuencFile = "") {
fileIn <- tempfile()
#Write something into it
writeLines(fileContent, fileIn)
#name of the target encrypted file
fileEnc <- paste0(fileIn, ".enc")
key <- "Ab4qY9qm"
#call the new method "callRDES" in the libdes.dll
result <- EncryptFile(sourceFile = fileIn,
encryptedFile = fileEnc,
key,
SUNOS = SUNOS,
HEX_KEY = HEX_KEY,
ECB = ECB,
UUENC = UUENC,
uuencFileName = fileIn)
# now decrypt
fileDec <- paste0(fileIn, ".dec")
result <- DecryptFile(fileEnc, fileDec, key, SUNOS, HEX_KEY, ECB, UUENC)
#read in decrypted file
#decryptedString <- readChar(fileDec, file.info(fileDec)$size)
decryptedString <- readLines(fileDec)
#cnt <- readChar(fileEnc, file.info(fileEnc)$size)
#decryptedString <- Decrypt(cnt, key = key)
return( decryptedString )
}
TestDES <- function(SUNOS = FALSE, HEX_KEY = FALSE, ECB = FALSE, UUENC = FALSE, uuencFile = "", numChars = 100, lines = 10) {
fileContent <- RandomString(n = lines, length = numChars)
#create a temporary file
res <- DESRounttrip(fileContent)
expect_equal(fileContent, res)
}
RandomString <- function(n = 1, length = 12) {
randomString <- c(1:n)
for (i in 1:n)
{
randomString[i] <- paste(sample(c(0:9, letters, LETTERS, " ", "|", ":"),
length, replace=TRUE),
collapse="")
}
return(randomString)
}
|
ccc750ea08f3e1001245acfd94b82c0f781c5785 | 67a257f33cac54b60519d1f27e171a2b474a5a6e | /cachematrix.R | 52b6287092442f9e40bebeb8169bc1f428bdf30e | [] | no_license | mmorley22/ProgrammingAssignment2 | 2473055db75626707e2ba9ffdf90de9d93437e1f | 706de395db8e580b51a494fcf0fa1c791e777471 | refs/heads/master | 2020-07-14T06:31:51.883153 | 2014-11-19T14:45:35 | 2014-11-19T14:45:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 929 | r | cachematrix.R | ## These functions take a matrix and allow the inverse of the matrix to be
## cached so that it does not have to be recalculated each time
## This function creates a vector that stores both the matrix and its inverse
## if the inverse had already been calculated
makeCacheMatrix <- function(mat = matrix()) {
inv <- NULL
set <- function(y) {
x <<- mat
inv <<-NULL
}
get <- function() mat
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get,
setinv=setinv, getinv = getinv)
}
## This function takes the vector created by makeCacheMatrix and
## finds the inverse if it is not already there
## returns the inverse
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <-x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
d2ad322c85f7a756f0852e642d10c271ba266904 | af8ba7f3052b9bde6c8e497b832d52da7624336a | /imagejprocessing_backgroundsubtract.r | 17bdd6c3a1a5d76c64441f1700af5fbdc61288e4 | [] | no_license | BradyAJohnston/upsfret | 4f316df8a666ef91ba8637bed76c2b7491ff8f68 | f3a9fd4e5ae005fd12c33055b9c3cd67032edc97 | refs/heads/master | 2020-07-15T18:16:46.477421 | 2020-02-09T23:29:42 | 2020-02-09T23:29:42 | 205,622,437 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,525 | r | imagejprocessing_backgroundsubtract.r | library("drc")
require("ggplot2")
require("reshape2")
require("scales")
require("tidyverse")
require("ggthemes")
require("ggdark")
direc <- "~/Dropbox/BondLab/Data/Typhoon Images/200205/rna/fijidata/"
fl<- list.files(direc, pattern = ".csv")
fl.cy3 <- list.files(direc, pattern = "Cy3")
fl.fret <- list.files(direc, pattern = "FRET")
strsplit(fl.fret[1], split="-", )
Lapply <- lapply(paste(direc, fl, sep = ""), read.csv, header=TRUE)
names(Lapply) <- fl
for(i in fl) {
Lapply[[i]]$Source = i}
comb <- do.call(rbind, Lapply)
comb <- comb %>% separate(Source, c("plate", "channel"), sep = "-")
comb <- comb %>% separate(channel, c("filter", NA), sep = ".cs")
details <- as.data.frame(c("04", '05', '06','NA', '07', '08', '09', '10', '11', '12', '13', '03'))
details
details$max <- c(40,40,20,NA,20,10,2,2,10,100,20,40)
colnames(details) <- c("platerna", "max")
details
i <- 1
# newlist <- list()
for (i in 1:6){
i <- i*2
details <- details
rn1 <- details$platerna[i-1]
rn2 <- details$platerna[i]
pl <- paste(rn1, rn2, sep = "_")
tmp1<-
comb %>% filter(plate==pl) %>%
filter(Row == "A" | Row =="B" | Row == "C") %>%
mutate(sample = paste(rn1))
tmp1$conc <- details$max[i-1]/ 2e-3 / (2^tmp1$Column)
# tmp1%>%head()
tmp2 <-
comb %>% filter(plate==pl) %>%
filter(Row == "D" | Row =="E" | Row == "F") %>%
mutate(sample = paste(rn2))
tmp2$conc <- details$max[i]/ 2e-3 / (2^tmp1$Column)
tmp2%>%head()
background <- comb %>% filter(plate == pl) %>%
filter(Row =="G" & Column == 1:3) %>%
mutate(sample=paste("background"))
background$conc <- 0
negative <- comb %>% filter(plate == pl) %>%
filter(Row =="G" & Column == 4:6) %>%
mutate(sample=paste("negative"))
negative$conc <- 0
# for(k in 1:3) {
# print(mean(negative$Mean[1*k:3*k]))
# print(negative$)
# }
positive <- comb %>% filter(plate == pl) %>%
filter(Row =="G" & Column == 7:9) %>%
mutate(sample=paste("positive"))
positive$conc <- 0
tmp3 <-
comb %>% filter(plate==pl) %>%
filter(Row == "G" & Column ==10:12) %>%
mutate(sample = "empty")
tmp3$conc <- NA
tmp4 <- comb %>% filter(plate == pl) %>%
filter(Row =="H") %>%
mutate(sample=paste("empty"))
tmp4$conc <- 0
tmp5 <- rbind(rbind(tmp1, tmp2), rbind(tmp3, rbind(negative, rbind(positive, rbind(background,tmp4)))))
assign(paste("output",i,sep = "."), tmp5)
}
dfl <- list(output.2, output.4, output.6, output.8, output.10, output.12)
newcomb <- do.call(rbind, dfl)
# head(newcomb)
# newcomb[grepl("03", newcomb$sample),]
# sel <- rownames(with(newcomb, newcomb[sample=="03" & Column =="1",]))
# newcomb%>% filter(sample =="03" & Column =="1")
#clean up bad value
# ggplot(newcomb %>% filter(), aes(conc, Mean)) + geom_point() + scale_x_log10() +
# facet_wrap(~sample~filter, scales = "free_y")
fretdf <- newcomb %>% filter(filter=="FRET")
cy3df <- newcomb %>% filter(filter=="Cy3")
calcfretdf <- fretdf
calcfretdf$calcfret <- fretdf$Mean / (fretdf$Mean + cy3df$Mean)
# head(calcfretdf)
#create plate lyout diagram
invert_geom_defaults()
ggplot(newcomb%>%filter(filter=="FRET"), aes(Column, fct_rev(Row), alpha=Mean)) +
geom_point(size = 8) +
# scale_x_discrete() +
scale_x_continuous("",breaks = 1:12, position = "bottom", limits = c(0.75, 12.25)) +
scale_y_discrete("") +
theme_presentation(base_size = 15, base_family = "Courier") +
theme(aspect.ratio = 8/12, legend.position = "") + facet_wrap(~plate)
calcfretdf %>% filter(sample == "positive" | sample=="negative") %>% group_by(plate,sample) %>%
summarise(meanval = mean(calcfret))
# corrections
# corrections[13:24,] <- corrections
# misclist <- c(04, 04, 06,06, 07, 07, 09, 09, 11, 11, 13, 13, 05,05, NA, NA, 08, 08, 10, 10, 12, 12, 03,03)
#
# corrections$rna <- misclist
#
# calcfretdf$calcfret <- (calcfretdf$calcfret- 0.472)/(0.485-0.472)
testnew <- calcfretdf %>% filter(sample != "03" | Column != 01)
# unique(testnew$sample)
testnew %>% filter(sample=="03" & Column==01)
# filter(sample == "04" & Column == "02")
bindings <- drm(calcfret~conc, curveid = sample, data=testnew%>%
# filter(sample != "03" & Column != 01) %>%
filter(
sample == "03" |
sample == "04" |
sample == "07" |
sample == "08" |
sample == "09" |
sample == "10" |
sample == "11" |
sample == "13"
),
fct = LL.4(names = c("b", "min", "max", "Kd"), fixed = c(NA, NA, NA, NA)))
pl <- plot(bindings)
plot(bindings)
bindings$curve
head(pl)
colnames(pl) <- c("conc", "04","07","08","09","10","11","13","03")
head(pl)
melt.pl <- melt(pl, id.vars = c("conc"))
colnames(melt.pl) <- c("conc", "sample", "value")
# bindings$coefficients[25:32]
kds <- as.data.frame(round(bindings$coefficients[25:32],0))
colnames(kds) <- c("Kd")
kds
kds$sample <- c("04","07","08","09","10","11","13","03")
head(kds)
head(melt.pl)
#
plotdf <- calcfretdf %>%filter(sample != "06" &
sample != "background" &
sample != "empty" &
sample != "NA" &
sample != "negative" &
sample != "positive" )
head(plotdf)
p3 <- ggplot(plotdf, aes(conc, calcfret, colour=sample)) +
scale_x_log10(limits=c(0.1,10000), breaks =c(10^(-1:5))) +
# geom_line(data=melt.pl, aes(conc, value)) +
geom_label(data=kds , aes(x = 0.1, y= 0.48, label=paste("Kd =",Kd, "nM")), hjust = 0, colour="white") +
geom_line(data=melt.pl, aes(conc, value, group=sample), colour="white", alpha=0.8) +
geom_point(data=plotdf, stat="summary") +
geom_errorbar(data=plotdf, stat="summary", width=0.05) +
labs(title = "FRET binding", y = "Arbitrary FRET Units", colour="RNA (BAJ_)") +
scale_y_continuous(limits = c(0.469,0.485), breaks=c(0.470, 0.485)) +
# coord_cartesian(ylim = c(0,1)) +
# theme_classic(base_size = 18) +
dark_theme_classic(base_size = 18, base_family = "Ubuntu")+
theme(aspect.ratio = 0.5, legend.position = "") +
facet_wrap(~sample, ncol = 1, strip.position = "right")
newplate <- plotdf
newcurve <- melt.pl
newkd <- kds
p3 + ggsave("~/Dropbox/BondLab/conferences/Lorne2020/posterfigures/testcolourFRET.svg")
|
07145e6e2cd06388d3642dbe93147d20ffeaf9e2 | 82f144c9d095217772f8bac0722c4012652fa14f | /man/spat.med.Rd | 25f121411bc612a345f0972f5763ac06f2e760de | [] | no_license | RfastOfficial/Rfast | 41cb1a922c3fc77b8045e5e00a031e012374c261 | 11cc9fc68b679eccfd6d0453c7f267e7f163cf41 | refs/heads/master | 2023-09-01T08:32:01.112957 | 2023-07-03T13:44:28 | 2023-07-03T13:44:28 | 213,182,742 | 107 | 15 | null | 2023-07-03T13:44:30 | 2019-10-06T14:25:53 | C++ | UTF-8 | R | false | false | 1,601 | rd | spat.med.Rd | \name{Spatial median for Euclidean data}
\alias{spat.med}
\title{
Spatial median for Euclidean data
}
\description{
Spatial median for Euclidean data.
}
\usage{
spat.med(x, tol = 1e-09)
}
\arguments{
\item{x}{
A matrix with Euclidean data, continuous variables.
}
\item{tol}{
A tolerance level to terminate the process. This is set to 1e-09 by default.
}
}
\details{
The spatial median, using a fixed point iterative algorithm, for Euclidean data is calculated. It is a robust location estimate.
}
\value{
A vector with the spatial median.
}
\references{
Jyrki Mottonen, Klaus Nordhausen and Hannu Oja (2010). Asymptotic theory of the spatial median.
In Nonparametrics and Robustness in Modern Statistical Inference and Time Series Analysis:
A Festschrift in honor of Professor Jana Jureckova.
T. Karkkaminen and S. Ayramo (2005). On computation of spatial median for robust data mining.
Evolutionary and Deterministic Methods for Design, Optimization and Control with Applications to
Industrial and Societal Problems, EUROGEN 2005, R. Schilling, W.Haase, J. Periaux, H. Baier, G. Bugeda (Eds)
FLM, Munich. http://users.jyu.fi/~samiayr/pdf/ayramo_eurogen05.pdf
}
\author{
Manos Papadakis and Michail Tsagris
R implementation and documentation: Michail Tsagris <mtsagris@uoc.gr> and Manos Papadakis <papadakm95@gmail.com>
}
%\note{
%% ~~further notes~~
%}
\seealso{
\code{\link{colMedians}
}
}
\examples{
res<-spat.med( as.matrix( iris[, 1:4] ) )
res<-colMeans( as.matrix(iris[, 1:4]) )
res<-colMedians( as.matrix(iris[, 1:4]) )
}
\keyword{ spatial median }
\keyword{ robust statistics }
|
bd54cdd7c154e1a29559024fbefc1065556eea40 | 391c70316b9a679631f0e2bb8b2e9bb2a3efa0d8 | /scripts/scripts/testing/automations/austria.R | f1cac68a365bd5d409baca0c554fa6ae46858213 | [] | no_license | rsavaris66/covid-19-data | a6125fbb1c9d94316a1f13f18ab6223fcd3b50a0 | 76a9f78c2738aedf06c3e95037872d57ced338f9 | refs/heads/master | 2022-12-13T06:31:06.278318 | 2020-09-15T15:30:47 | 2020-09-15T15:30:47 | 295,803,177 | 2 | 0 | null | 2020-09-15T17:35:19 | 2020-09-15T17:35:18 | null | UTF-8 | R | false | false | 639 | r | austria.R | url <- "https://www.sozialministerium.at/Informationen-zum-Coronavirus/Neuartiges-Coronavirus-(2019-nCov).html"
count <- read_html(url) %>%
html_node(".table-responsive table") %>%
html_table(dec = ",")
row_n <- which(str_detect(count$Bundesland, "Testungen"))
col_n <- which(str_detect(names(count), "Österreich"))
count <- count[row_n, col_n] %>%
str_replace_all("[^\\d]", "") %>%
as.integer()
add_snapshot(
count = count,
sheet_name = "Austria",
country = "Austria",
units = "tests performed",
source_url = url,
source_label = "Austrian Ministry for Health",
testing_type = "PCR only"
)
|
0330c02ea1819c67b4461821dc97769d077c4870 | f1f1b786f77cfe59bde852cf53d2aeb5907d844b | /man/persp.loca.p.Rd | 3cf7ef47425c78190f0274bfd6f1a2465171c1ee | [] | no_license | cran/orloca | 69fad98a17c6f1c92cac80309943fb9d9f08852e | d80e3d721bb9cbb82540e3c3439dd8d2c886f1e6 | refs/heads/master | 2023-05-26T11:31:08.153995 | 2023-05-16T10:10:02 | 2023-05-16T10:10:02 | 17,698,127 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,345 | rd | persp.loca.p.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/persp.loca.p.R
\name{persp.loca.p}
\alias{persp.loca.p}
\title{Plots of the min-sum objective function}
\usage{
\method{persp}{loca.p}(
x,
lp = numeric(0),
xmin = min(x@x),
xmax = max(x@x),
ymin = min(x@y),
ymax = max(x@y),
n = 10,
ticktype = "detailed",
...
)
}
\arguments{
\item{x}{The loca.p object to compute the objective.}
\item{lp}{If given, then \eqn{l_p} norm will be used instead of the Euclidean norm.}
\item{xmin}{The minimum value for x axis.}
\item{xmax}{The maximum value for x axis.}
\item{ymin}{The minimum value for y axis.}
\item{ymax}{The maximum value for y axis.}
\item{n}{The number of divisions for grid.}
\item{ticktype}{parameter to pass to low level function persp}
\item{\ldots}{Other options.}
}
\value{
A plot a 3D plot or min-sum function.
}
\description{
\code{persp} provides a graphical representations of min-sum function (\code{distsum}).
}
\details{
If \eqn{p<1} then \eqn{l_p} is not a norm, so only \eqn{p>=1} are valid values.
}
\examples{
# A new unweighted loca.p object
loca <- loca.p(x = c(-1, 1, 1, -1), y = c(-1, -1, 1, 1))
# The 3D graphics
persp(loca)
}
\seealso{
See also \code{\link{orloca-package}}, \code{\link{plot.loca.p}} and \code{\link{loca.p}}.
}
\keyword{classes}
\keyword{hplot}
|
04c68d7f9c07524a4f200b35e6afc2c0ecadd145 | 8c9c2cc41eea2b9e370098bfa105b17c4282b538 | /data-raw/exemple_dataset.R | 0cb46f6c4093330d4f67161da1efdccaef96cbb3 | [] | no_license | AThibault92/paleotools | 760a435479fc94a392fdc4e7ea8760976c6230d5 | 382ec4b42bca2de735370ba6473fdcd8b4712698 | refs/heads/master | 2023-03-23T14:11:38.918615 | 2021-03-19T11:44:31 | 2021-03-19T11:44:31 | 312,003,845 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 170 | r | exemple_dataset.R | ## code to prepare `exemple_dataset` dataset goes here
example_dataset <- readr::read_csv("./data-raw/template.csv")
usethis::use_data(example_dataset, overwrite = TRUE)
|
1c2a6d34d70fafafbdc326544411a1ec2c6c9be9 | 29585dff702209dd446c0ab52ceea046c58e384e | /write.snns/R/write.snns.R | d0bf46c21f9580661f0166b543a21613498d9a26 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,592 | r | write.snns.R | #################################################
# write.snns.R (v0.0-4.2) 2007/04/03 #
# Authors: #
# Manuel Castejon Limas. #
# mail: manuel.castejon@unileon.es #
# Joaquin Ordieres Mere. #
# mail: joaquin.ordieres@dim.unirioja.es #
# Francisco Javier de Cos Juez #
# mail: decos@api.uniovi.es #
# Francisco Javier Martinez de Pison #
# mail: fjmartin@dim.unirioja.es #
#################################################
# This function creates a SNNS pattern file
# from a data.frame or matrix.
write.snns <- function(x,file="", noutputs=1)
{
file.create(file)
cat(" SNNS pattern definition file v3.2\n", file=file, append=TRUE)
cat(paste(" generated at ",date(),"\n\n\n\n\n\n\n"), file=file, append=TRUE)
cat(paste(" No. of patterns :",nrow(x),"\n"), file=file, append=TRUE)
cat(paste(" No. of input units :",ncol(x)-noutputs,"\n"), file=file, append=TRUE)
cat(paste(" No. of output units :",noutputs,"\n\n"), file=file, append=TRUE)
for (i in 1:nrow(x))
{
cat(paste("\n#",i,"\n"), file=file, append=TRUE)
numcol <- ncol(x)
while( numcol >10)
{
line <- as.character(x[i,1])
for(j in 2:10)
line <- paste(line,x[i,j])
cat(line, file=file, append=TRUE)
cat("\n", file=file, append=TRUE)
numcol <- numcol - 10
}
line <- as.character(x[i,1])
for(j in 2:numcol)
line <- paste(line,x[i,j])
cat(line, file=file, append=TRUE)
cat("\n", file=file, append=TRUE)
}
}
|
989921e3011e693cd7ebe02f76a4b1b368a3dca8 | 0c99b3fa7b411e5c5d8e2daced527a4fadfddc7d | /seminar_5_12.R | ff8cbff2f3af31ed9e318aef2ebe15f00834499c | [] | no_license | ArtemBaskal/regression-models | 9ae9bcac3b10b4d925317728fb3ed8765972d947 | 38ba9400ddd78792af1167a78dbee95694ad923c | refs/heads/master | 2022-04-06T04:04:56.817873 | 2019-12-14T21:02:29 | 2019-12-14T21:02:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,182 | r | seminar_5_12.R | "b - beta
Модели бинарного выбора
Зависимая переменная - бинарная - принимает значения 1 или 0
y = 1 или 0 (например вернет ли кредитор кредит)
Вероятность возвращения кредита это доля - sum(yi..n) / N -
доля это выборочна оценка вероятности
1) y = b0 + b1*x1 + ... + bk*xk
E[y|x] => P{y=1|x} = x*b
P{y=0|x} = 1 - x*b
Проблема - нет гарантии что x*b < 1 - сложность интерпретации
Остатка в такой модели гетероскедастичны
(дисперсия остатков завивист от bксов) по определению -
Чем ближе вероятность к 1 тем меньше дисперсия
y ei pi - вероятность y = 1 или 0
x*b 1-x*b x*b
1-x*b -x*b 1-x*b
Есть зависимоть от x
P{y=1|x}=x*b+e
ei pi
y=1 1-x*b x*b
y=0 -x*b 1-x*b
Если вопрос про гетероскедастичность то сразу нужно посчитать дисперсию
Var[x]=E[x^2]-(E[x]^2) (матожидание)
(На контрольной можно писать формулы без слов на листочек)
E[e]=(1-x*b)*x*b + (-x*b)*(1-x*b) = 0 => Var[e] = E[e^2]
E[e^2]=(1-x*b)^2*x*b + (-x*b)^2*(1-x*b) = x*b-(x*b)^3
Var[e] = x*b-(x*b)^3 - есть иск - значит есть непостоянность дисперсии
- гетероскедастичность
Как эту проблему решать:
1)Линейная модель вероятности: P{y=1|x} = x*b + e = b0 + b1*x1 + .. + bk*xk + e
P{y=1|x} = G(b0 + b1*x1 + .. + bk*xk + e)
функция 0<=G(z)=<1
P{y=1|x}_hat=0.6 => y_hat = 1
P{y=1|x}_hat=0.4 => y_hat = 0
1)G(x*b) = exp(x*b) / (1 + exp(x*b)) = Л(x*b) - логичестичкая функция и логик модель
2)G(x*b) = Интеграл от минус бесконечности до x*b по fi(v)dV; fi(v)=1/(sqrt(2*pi))*exp(-v*2/2)
- probit model -функция нормального распределения N(0,1) - интерпретация не будет отличаться)
Как интерпретировать коэффициенты:
предельный эффект bi * dP{y=1|x} / dx1 = b1
P{y=1|x} = G(b0 + b1*x1 + .. + bk*xk + e)
dP{y=1|x} / dx1 = b1 * G(.)' =
G(x*b) = Л = exp(x*b) / (1 + exp(x*b))
b0 = -3.07
b1 = -0.084
Предельный эффект (частная производная для b1) =
d/dx(exp(x b)/(1 + exp(x b))) = (b e^(b x))/(e^(b x) + 1)^2, где x = b0 + b1*x1 =>
e^(-3.07 + 0084*x1) / (1 + e^(-3.07 + 0084*x1))
(В каждой точке x будет свой коэффициент)
предельное значение - d(P{y=1|x})/dx1 = b1*e^(b0 + b1*x1)/(1 + e^(b0 + b1*x1))
2) logid/probit модели
Оценки
P{y=1|x}=G(x*b) [1]
P{y=0|x}=1-G(x*b) [m]
функция правдоподобия L = П iэ1 (G(x*b)) * П iэm(1 - G(x*b)) (П - произведение) =
П i..N G(xi*b)^yi * [1 - G(xi*b)]^(1-yi) - берем производную и максимизируем
(в контрольной не будет)
Метод главных компонент
x1 x2 cov(x1*x2) > 0
(Поле корреляции линейноа как x1 = 1x2)
Центрируем поле: x1 - x1_среднее и x2 - x2_среднее
Идея - повернуть ось координат вдоль поля корреляции и снизить размерность на 1 -
много информации не потеряется из-за сильно скоррелированности переменных
Пример - сжатие картинки
Любую картинку можно представить в виде матрицы цифр RGB
Стоящие по соседству столбики очень сильно скоррелированны -
(в примере можно снизить размер картинки вдвое потерей 3% качестве)
1)снижения размерности признакового пространства
2)решение проблемы мультиколлинеарности (сильной скоррелированности иксов)
проводим ось там где максимально возможная дисперсия
должна быть ортогональность для борьбы с мультиколлинеарностью
Алгоритм метода главных компонент:
1)центрируем и нормируем (распределение не обязатлеьно нормальное) данные
2)считаем ковариационную матрицу для иксов ( матрица 2x2 ) -
cov(x1*x1) cov(x1*x2)
cov(x2*x1) cov(x2*x2)
3)находим собственые вектора (которые не меняются при умножении)
4)создается новая матрица вращения - она состоим из собственных векторов
R=(eig1...eig2) - по убыванию собственного значенмя
5)получение координат в новых осях -
X' = X*R - новая матрица это старая матрица перемноженная с матрицей вращения
пример
x1 x2
2.5 2.4
0.5 0.7
2.2 2.3
1.9 3.2
3.1 3.0
x1^n x2^n
0.49 -0.04
-1.64 -1.68
0.17 0.45
-0.15 0.74
1.13 0.54
cov(x1, x2) = 0.82
Найдем собственный вектор (-1 -6)
(2 6)
Ax = /lx
Ax = (/lI)x
Ax - (/lI)x = 0
x(A-/lI)=0
(-1 - /l) (6 - /l) + 12 = 0;
/l1 = 2; /l2 = 3;
x = -2y
3)R2x2 X5x2
X_new(5x2) = X(5x2) * R(2x2)
вращение - вокруг первой компоненты максимальная дисперсия а вторая перепендикулярно первой
prcomp(r, center = TRUE)
"
|
7d912ea477a9f67244a694311e5f090c053be12d | b12facd976f77577b0a99cd4b7ed69c72babb824 | /R_code_faPAR.r | 32d4c06fc6ca580ec3c65f97827cb8a9433f9c29 | [] | no_license | SofiaPrandelli/Monitoring | a8a84eb2650aece6c1b0efa65d2d110c90930a58 | bedb6670c65dfb374252f3e5f4eb028f9a957bbc | refs/heads/master | 2021-04-23T23:30:25.610006 | 2021-02-12T15:03:05 | 2021-02-12T15:03:05 | 250,031,536 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,793 | r | R_code_faPAR.r | # R_code_faPAR.r
# how to look at chemical cycling from satellites
# install.packages("raster")
setwd("/Users/sofiaprandelli/lab")
library(raster)
library(rasterVis)
library(rasterdiv)
plot(copNDVI9)
copNDVI <- reclassify(copNDVI, cbind(253:255, NA))
levelplot(copNDVI)
faPAR10 <- raster("faPAR10.tif")
levelplot(faPAR10)
|
# WHAT HAPPEN IN THIS CASE? differences with the previous graph? there's a big rate of photosyntesis in the euator band
# while on top of the northern part the NDVI it's not so high --> most of the light is going through plants and down to the soil so not used for photosyntesis
# also in conifer forest: light is not so used like in tropical forest (where all the light is udes)
# DIFFERENT CAPABILITY OF TAKING LIGHT
#carbon uptake of plants--> very high in tropical forests
#save the plot as pdf
pdf("copNDVI.pdf")
levelpot(copNDVI)
dev.off()
pdf("faPAR.pdf")
levelplot(faPAR10)
dev.off() # to safe plots as PDF from R
# estimate where is the relation between NDVI and faPAR
####################################################### day 2
setwd("C:/lab/")
load("faPAR.RData")
library(raster)
library(rasterdiv)
library (rasterVis)
#the original faPAR from Copernicus is 2GB
# let's see how much space is needed for an 8-bit set
writeRaster( copNDVI, "copNDVI.tif")
# 5.3 MB
# to make the level plot of the faPAR
levelplot(faPAR10) #faPAR = fraction of the solar radiation absorbed by live leaves
##### regression model between faPAR and NDVI : relationship between the 2 variables
erosion <- c(12, 14, 16, 24, 26, 40, 50, 67) #example of values of erosion in a certain area
hm <- c(30, 100, 150, 200, 260, 340, 460, 600) #heavy metals in this area
# now we do a plot between EROSION AND HEAVY METALS
plot(erosion, hm, col="red", pch=19, xlab="erosion", ylab="heavy metals")
# HOW MUCH the 2 variables are related to each others? --> LINEAR MODEL
model1 <- lm(hm ~ erosion)
summary(model1)
abline(model1)
####### faPAR vs NDVI model
library(raster)
library(rasterdiv)
install.packages("sf")
library(sf)
setwd("/Users/sofiaprandelli/lab")
faPAR10 <- raster("faPAR10.tif")
plot(faPAR10)
plot(copNDVI)
copNDVI <- reclassify(copNDVI, cbind(253:255, NA), right=TRUE)
library(sf) # to call st_* functions
random.points <- function(x,n) # x= raster file; n = points
{
lin <- rasterToContour(is.na(x))
pol <- as(st_union(st_polygonize(st_as_sf(lin))), 'Spatial') # st_union to dissolve geometries
pts <- spsample(pol[1,], n, type = 'random')
}
pts <- random.points(faPAR10,1000)
copNDVIp <- extract(copNDVI, pts)
faPAR10p <- extract(faPAR10,pts)
# photosyntesis vs biomass
model2 <- lm(faPAR10p ~ copNDVIp)
plot(copNDVIp, faPAR10p, col="green", xlab="biomass", ylab="photosynthesis")
abline(model2, col="red")
|
9423be505962ada7f5da594658c6a914f4bfa2ee | 9889b1c92fba3feaac26f0d67f0ee34fb2a85482 | /R/draw.nmds.R | 2bbf2556f6e6cf458806dc6f46de0a69aaeedfc6 | [] | no_license | zdealveindy/ordijuice | 3e7f542b4564299e9c2d6c18c51234125317d27d | cbaf6dd33c4ae90a185c2290a5d451de5e66ebef | refs/heads/master | 2020-06-03T14:44:23.151763 | 2015-01-27T21:40:28 | 2015-01-27T21:40:28 | 29,902,092 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,493 | r | draw.nmds.R | draw.nmds <-
function(display.in.diagram = c('sites'),
display.species = c('none'),
display.sites = c('points'),
axes.shown = c(1,2),
display.EIV = FALSE,
display.header = FALSE,
display.envelope = FALSE,
header.name = 'env',
display.header.style = c('arrow'),
display.spider,
display.group.center = FALSE,
three.dim = FALSE,
resolution = c(1280, 768),
bw = FALSE,
...)
{
newRversion <- check.install (display.spider)
open.r.window(three.dim, resolution)
if (newRversion) pb <- myTkProgressBar (paste (ifelse (three.dim, '3D', '2D'),'NMDS - Analysis progress'), 'Importing data from JUICE', 0, 100, 20) else pb <- NULL
if (three.dim) axes.shown <- c(1,2,3) else axes.shown <- c(1,2)
k <- ifelse (three.dim, 3, 2)
write ('End of ordination', file='result.txt')
library (vegan)
input.data <- read.check.data (display.sites = display.sites, display.EIV = display.EIV, display.header = display.header, display.envelope = display.envelope, display.spider = display.spider, display.group.center = display.group.center)
## 2. update progress bar
if (newRversion) setTkProgressBar (pb, label = 'Calculation of ordination', value = 40)
## calculation of ordination
last.result <- use.last (input.data, 'nmds', setting = list (k = k))
if (last.result$use.last.result) spec.data.ord <- last.result$last.data.result else
spec.data.ord <- metaMDS(input.data$spec.data, distance = 'bray', k = k, trymax=0, autotransform = FALSE, zerodist = "add")
# 3. update progress bar
if (newRversion) setTkProgressBar (pb, label = 'Saving results', value = 60)
save.ord.result (spec.data.ord, last.result$use.last.result, 'nmds', input.data$deleted.plots)
# 4. update progress bar
if (newRversion) setTkProgressBar (pb, label = 'Drawing the figure', value = 80)
if (three.dim)
draw.3d(input.data = input.data, spec.data.ord = spec.data.ord, display.in.diagram = display.in.diagram, display.species = display.species, display.sites = display.sites, axes.shown = axes.shown, display.EIV = display.EIV, display.header = display.header, display.envelope = display.envelope, header.name = header.name, display.header.style = display.header.style, display.spider = display.spider, display.group.center = display.group.center, pb = pb) else
draw.2d(input.data = input.data, spec.data.ord = spec.data.ord, display.in.diagram = display.in.diagram, display.species = display.species, display.sites = display.sites, axes.shown = axes.shown, display.EIV = display.EIV, display.header = display.header, display.envelope = display.envelope, header.name = header.name, display.header.style = display.header.style, display.spider = display.spider, display.group.center = display.group.center, bw = bw, pb = pb)
if (!last.result$use.last.result)
{
last.data <- list (last.matrix.sum = sum(input.data$spec.data), last.matrix.species = colnames (input.data$spec.data), last.matrix.sites = rownames (input.data$spec.data), last.result = spec.data.ord)
save (last.data, file = 'nmds_lfa.r')
last.data.quick <- list (type.of.analysis = 'nmds', size.of.matrix = dim (input.data$spec.data), setting = list (k = k))
save (last.data.quick, file = 'nmds_lfq.r')
}
}
|
a688d21f7a8e0135496aea8c765a51db6e72f9c7 | 97f1e3e6e908a83489e4243268ba539316196176 | /man/var.Rd | 66f092f83e7b5dcbc26646eaf18052e244fee9ff | [
"Apache-2.0"
] | permissive | ANTsX/ANTsRCore | 1c3d1da3bea84859da7d18f54c34ae13d2af8619 | 8e234fd1363c0d618f9dc21c9566f3d5464655a2 | refs/heads/master | 2023-05-24T23:53:30.886217 | 2023-05-22T02:52:39 | 2023-05-22T02:52:39 | 83,897,912 | 8 | 22 | null | 2023-05-22T02:52:40 | 2017-03-04T14:09:48 | C++ | UTF-8 | R | false | true | 698 | rd | var.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zzz_Summary.R
\name{var}
\alias{var}
\alias{var.default}
\alias{var.antsImage}
\title{Variance generic}
\usage{
var(x, ...)
\method{var}{default}(x, ...)
\method{var}{antsImage}(x, ..., na.rm = FALSE, mask = NULL)
}
\arguments{
\item{x}{an object for which we want to compute the variance}
\item{\dots}{Any additional arguments to be passed to
\code{\link[stats]{var}}}
\item{na.rm}{a logical value indicating whether NA should be removed}
\item{mask}{is an object of class \code{antsImage}}
}
\description{
Calculates the variance of an image
}
\examples{
img <- antsImageRead( getANTsRData( "r16" ) )
var(img)
}
|
20a0832881fd54aad746e36243492856d5eecf3a | f74447afeedad183a2df5f5a2197f52f49202281 | /R/get_M.R | a4ad8da75bb1883e8fbb512ba648647faace529d | [] | no_license | bacovcin/tsinfer-R | e02630cc3e8b110481393d4428240f42ca2a1b36 | 6766fd8a5f5a7fc09078fd8d8ff00fe9b76934a4 | refs/heads/master | 2021-09-14T01:16:14.173769 | 2018-05-07T02:27:58 | 2018-05-07T02:27:58 | 65,288,884 | 2 | 0 | null | 2018-05-07T02:27:58 | 2016-08-09T11:14:23 | R | UTF-8 | R | false | false | 119 | r | get_M.R | get_M <-
function(x0, s, t) {
if (s == 0) {
return(1)
} else {
exp(-s * t)/(x0 + (1-x0)*exp(-s*t))^2
}
}
|
4726074fa344b5400a1ca6cad922d98f3bef1ed9 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/hyperSpec/examples/wl.Rd.R | f01c61d77478c6e82ed12f756ca6391a840e97cf | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 523 | r | wl.Rd.R | library(hyperSpec)
### Name: wl
### Title: Getting and Setting the Wavelength Axis 'wl' returns the
### wavelength axis, 'wl<-' sets it.
### Aliases: wl wl<-
### ** Examples
wl (laser)
# convert from wavelength to frequency
plot (laser)
wl (laser, "f / Hz") <- 2.998e8 * wl (laser) * 1e9
plot (laser)
# convert from Raman shift to wavelength
# excitation was at 785 nm
plot (chondro [1])
wl (chondro) <- list (wl = 1e7 / (1e7/785 - wl (chondro)), label = expression (lambda / nm))
plot (chondro [1])
|
45b5cc369129aa6efdc540e3c1358f99430a0cbc | 6b7330c8b2d0f485af470c5be84eb33c5aa45114 | /run_analysis.R | 06acc75523b2882f0d198189c3cc1b19cd8887e7 | [] | no_license | IBarata/Getting_and_Cleaning_Data_Project | 16f597fdaaccfba7b43402c396eef072c89b1213 | 533bc22a468e63e380d46d17635821cb28f8e296 | refs/heads/master | 2021-01-20T06:50:11.621388 | 2017-08-27T01:04:03 | 2017-08-27T01:04:03 | 101,518,493 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,581 | r | run_analysis.R | ####Getting and Cleaning data Project
#Set working directory:
setwd("C:/Users/Inga/Desktop/Coursera/3_Getting_and_Cleaning_Data")
#Check if file exists:
if(!file.exists("./dataProject")){dir.create("./dataProject")}
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
#Download file:
download.file(url, destfile= "./dataProject/dataset.zip")
#Record download date
dateDownload <- date() #Thu Aug 24 12:20:07 2017
#Unzip dataset
unzip(zipfile="./dataProject/dataset.zip", files = NULL, list = FALSE, overwrite = TRUE,
junkpaths = FALSE, exdir = "./dataProject", unzip = "internal",
setTimes = FALSE)
#____
##1.Merging the training and the test sets to create one data set:
#Preparing data to merge:
#Reading data
#training:
x_train <- read.table("./dataProject/UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./dataProject/UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./dataProject/UCI HAR Dataset/train/subject_train.txt")
#testing:
x_test <- read.table("./dataProject/UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./dataProject/UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./dataProject/UCI HAR Dataset/test/subject_test.txt")
#features:
features <- read.table('./dataProject/UCI HAR Dataset/features.txt')
#activity labels:
activityLabels = read.table('./dataProject/UCI HAR Dataset/activity_labels.txt')
#Assign names to columns
colnames(x_train) <- features[,2]
colnames(y_train) <-"activityId"
colnames(subject_train) <- "subjectId"
colnames(x_test) <- features[,2]
colnames(y_test) <- "activityId"
colnames(subject_test) <- "subjectId"
colnames(activityLabels) <- c('activityId','activityType')
#Merging the training and the test sets in one data set:
train <- cbind(y_train, subject_train, x_train)
test <- cbind(y_test, subject_test, x_test)
all_data <- rbind(train,test)
#___
##2.Extracting only the measurements on the mean and standard deviation for each measurement
#Reading column names
colNames <- colnames(all_data)
#Extracting data - creating vector for defining ID, mean and standard deviation:
mean_std <- (grepl("activityId" , colNames) |
grepl("subjectId" , colNames) |
grepl("mean.." , colNames) |
grepl("std.." , colNames)
)
#Creating data set from all_data with the desired columns
set_mean_std <- all_data[ , mean_std == TRUE]
#___
##3.Use descriptive activity names to name the activities in the data set
ActivityNames <- merge(set_mean_std, activityLabels,
by='activityId',
all.x=TRUE)
##4.Appropriately labeling the data set with descriptive variable names
#Done in previous steps, with the codes:
#mean_std <- (grepl("activityId" , colNames) |
# grepl("subjectId" , colNames) |
# grepl("mean.." , colNames) |
# grepl("std.." , colNames)
#)
#and
#set_mean_std <- all_data[ , mean_std == TRUE]
#___
##5.Creating a second, independent tidy data set with the average of each variable for each activity and each subject:
#Making second tidy data set:
secondTidySet <- aggregate(. ~subjectId + activityId, ActivityNames, mean)
secondTidySet <- secondTidySet[order(secondTidySet$subjectId, secondTidySet$activityId),]
#Writing second tidy data set in txt file:
write.table(secondTidySet, "secondTidySet.txt", row.name=FALSE)
|
32a6b9d6147b3b2ab9e2ea03b0c1442ddcfedd76 | 4947e18a214971585d0c0e5b18552fb6dd786db0 | /sentence_bucketter.R | 4170e89d6a13e4ca5224c72ec6052f2b06d9a05f | [] | no_license | connermcb/next_word_predictor | acbb7c92039dd7457583ac78332b5085fffe8f83 | bf3fae2cfe2e7f1c18af61303476aa98e3ac4fb3 | refs/heads/master | 2020-03-27T20:05:21.159387 | 2018-11-18T03:01:22 | 2018-11-18T03:01:22 | 147,037,145 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,002 | r | sentence_bucketter.R | ## Script for sorting sentences into buckets by length, i.e. number of tokens
# packages
library(tidytext)
library(readr)
library(quanteda)
setwd("c:/users/conner/jhopkins_data_science/capstone/final/")
# load dictionaries
load("unique_tokens_dict.RData")
load("reverse_token_dict.RData")
# function for bucketting sentences by number of tokens
sentenceBucketter <- function(path, dict, rev_dict){
# read file
text_vec <- read_file(file = paste0("english_us/", path))
# split text file into sentences
sent_vec <- tokens(text_vec, what = "sentence", remove_numbers = TRUE,
remove_punct = TRUE, remove_symbols = TRUE, remove_separators = TRUE,
remove_twitter = TRUE, remove_url = TRUE, verbose = TRUE)
# clean up and make space in memory
rm(text_vec)
gc()
# tokenize words by sentence, result is vector of vectors
token_vec <- tokens(sent_vec[[1]], what = "word", verbose = TRUE,
remove_punct = TRUE, remove_numbers = TRUE,
remove_twitter = TRUE, remove_url = TRUE,
remove_symbols = TRUE, remove_separators = TRUE)
len_token_vec <- length(token_vec)
# turn vector class from token to character
token_vec <- sapply(token_vec, as.character)
# clean up and make space in memory
rm(sent_vec)
gc()
# create hash table and and buckets named 1 to 100
buckets <- new.env()
for(bkt in as.character(seq_len(100))){
assign(bkt, vector("list", length = 1000), envir = buckets)
}
nxt_slot <- rep(1, 100)
# create progress bar
pb <- tkProgressBar(title = path, min = 0,
max = len_token_vec, width = 300)
# sort sentences (tokenized by word) to buckets
# based on number of words, i.e. sentence length
system.time(
for(i in 1:len_token_vec){ #1:len_token_vec
# update progress bar
setTkProgressBar(pb, i, label=paste( round((i/len_token_vec)*100, 2),
"% done"))
#change class to character from token
sentence <- unlist(token_vec[i])
# check spelling and remove tokens not in dictionary
sentence <- tolower(sentence)
spell_check <- hunspell_check(sentence)
sentence <- sentence[spell_check]
# filter out one-letter 'words' except for I and a
one_word_test <- sapply(tokens_vec, function(tkn){nchar(tkn) > 1 | tkn %in% c('i', 'a')})
tokens_vec <- tokens_vec[one_word_test]
# filter very short or very long sentences
sent_len <- length(sentence)
if(sent_len < 3 | sent_len > 100) next
# change words to integers
sentence <- as.integer(rev_dict[sentence])
# throw sentence in bucket
rw <- nxt_slot[sent_len] # sent_len defines/indexes bucket
nxt_slot[sent_len] <- (rw + 1)
bkt <- as.character(sent_len) # assign bucket based on n tokens
buckets[[bkt]][[rw]] <- sentence
})
# name and save environment for use in
hashtable_name <- paste0(sub("en_US_([a-z0-9]+)\\.txt", "\\1", path), "_hashtable.RData")
cat("Saving ", hashtable_name, "\n")
save(buckets, file = hashtable_name)
# clean up memory
rm(token_vec)
rm(buckets)
gc()
# clean up progress bar
close(pb)
}
paths <- c("en_US_blogs.txt", "en_US_news2.txt", "en_US_twitter.txt")
for( p in paths[3:3] ){
sentenceBucketter(p, dict, rev_dict)
}
|
da65124e6bc14eee9385b06fcbc89005ae3bf8a6 | 50a224c2e0a946c21950a8102370979b1dfea695 | /man/process_pheno.Rd | bd53f108030b5070f09d0b2cc087dc8fa2f62f6b | [] | no_license | AndersenLab/cegwas | 0c5c0eb9568fe1b1f3ee931b2dfd0a27c894b21f | 9ee400c8e11a1f317d382010bcfecf81ab92160a | refs/heads/master | 2020-12-11T18:46:06.022585 | 2020-03-02T22:30:15 | 2020-03-02T22:30:15 | 41,169,335 | 3 | 3 | null | 2015-10-26T03:43:42 | 2015-08-21T17:48:22 | R | UTF-8 | R | false | true | 1,947 | rd | process_pheno.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process_phenotypes.R
\name{process_pheno}
\alias{process_pheno}
\title{Process Phenotype Data}
\usage{
process_pheno(data, remove_strains = TRUE, duplicate_method = "first",
use_bamf = TRUE)
}
\arguments{
\item{data}{is a dataframe containing phenotype data. The dataframe can be structured in wide or long format. %
\cr
\cr
\bold{long format} - The first columns should be \code{strain} whereas additional columns are traits.
One row list a strain followed by all of that strains phenotypes.
\cr\cr
\code{strain}, \code{trait1}, \code{trait2}, \code{...}
\cr\cr
\bold{wide format} - The first column should be named \code{trait} and subsequent
all additional columns should be strains. One row corresponding to one trait for all strains.
\cr\cr
\code{trait}, \code{strain1}, \code{strain2}, \code{...}
\cr\cr}
\item{remove_strains}{Remove strains with no known isotype. Default is TRUE.}
\item{duplicate_method}{Method for dealing with the presence of multiple strains falling into the same isotype. Either \code{"average"} or \code{"first"}.}
\item{use_bamf}{use bamf prune to remove outliers}
}
\value{
Outputs a list. The first element of the list is an ordered vector of traits.
The second element of the list is a dataframe containing one column for each strain, with values corresponding to traits in element 1 for rows.
}
\description{
\code{process_pheno} Processes raw data file for GWAS mapping using \code{\link{gwas_mappings}} function
}
\details{
This function takes raw phenotype data and eliminates outlier strains with a modified version of \code{\link{bamf_prune}} from the easysorter package.
Additionally it eliminates any traits that have the same values for >95% of the strains (important for binary traits).
If multiple strains are present that fall into the same isotype, they can be removed by setting \code{\link{remove_strains}} to
}
|
d10680b05f71ef2bbdcfd5c3001617bb54938624 | 37166e4736d20b82228a243d092b493061669e78 | /pwn/man/dlsplit.Rd | c85e9e1b8f0ad6bf8936e10a149bd2e4d927d230 | [] | no_license | renegadeWolfe/pwn | 7e6c83b7aabe08a2d368975519848b1498cc9c48 | 3e3c39a7e01b8b884d11aa4ca2526083a1e606be | refs/heads/master | 2020-03-14T09:24:30.356523 | 2018-06-26T02:02:16 | 2018-06-26T02:02:16 | 131,544,232 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,638 | rd | dlsplit.Rd | \name{dlsplit}
\alias{dlsplit}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
Splits the "<" sign from those annoying variables that have a detection limit and have been keyed in as such.
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
Cleans those unusable variables that have a detection limit, and were keyed in with the "<" sign. R will read such variables as a factor.
This will return the variable in question as a numeric and another variable that consists solely of the "<" sign
}
\usage{
dlsplit(df1, varX)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{df1}{
%% ~~Describe \code{df1} here~~
is a dataframe with the variable in question.
}
\item{varX}{
%% ~~Describe \code{varX} here~~
is column name of the variable in question, entered as a character string.
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
It is I, LeClerc!
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
id <- c(1:5)
alpha <- c(0.015, "<0.003", 0.0025, 0.007, "<0.003")
bravo <- c(0.002, "<0.003", 0.007, 0.125, ">0.5")
x <- data.frame(id,alpha,bravo)
x <- dlsplit(x,"alpha")
x <- dlsplit(x,"bravo")
## The function is currently defined as
function (df1, varX)
{
red <- as.character(df1[,varX])
varNme <- as.character(varX)
varNme_less <- paste(varNme,"ND",sep="_")
red.1stChar <- substr(red,1,1)
red.lessthan <- ifelse(red.1stChar=="<","<",
ifelse(red.1stChar==">",">",NA))
red <- sub("<","",red)# remove less than sign.
red <- as.numeric(sub(">","",red))# remove more than sign.
df1[,varX] <- NULL
df1$var.nd <- red.lessthan
names(df1)[names(df1) == 'var.nd'] <- varNme_less
df1$red <- red
names(df1)[names(df1) == 'red'] <- varNme
return(df1)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ detection }
\keyword{ limit }
\keyword{ split }
\keyword{ fix }% __ONLY ONE__ keyword per line
|
e5143ecc9821c93db459e6ee387949fd3d5cd0bb | 41cd2ef22cbbc05b56bda9a635a9bbc75b52ac62 | /man/FitARz.Rd | 5e20762144e3a06ef15492ef910aba8742774339 | [] | no_license | githubfun/FitAR | 02d05c8c0682769a985e86c2b25d742ea741d789 | 015df6beac6d9e0beff55c2f20a01a9e07911e67 | refs/heads/master | 2018-02-28T10:48:51.369858 | 2013-03-15T00:00:00 | 2013-03-15T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,982 | rd | FitARz.Rd | \name{FitARz}
\alias{FitARz}
\title{ Subset ARz Model Fitting }
\description{
The subset ARz model, defined by constraining partial autocorrelations to zero,
is fitted using exact MLE.
When length(p)=1, an AR(p) is fit by MLE.
}
\usage{
FitARz(z, p, demean = TRUE, MeanMLEQ = FALSE, lag.max = "default")
}
\arguments{
\item{z}{ time series, vector or ts object }
\item{p}{ p specifies the model. If length(p) is 1, an AR(p) is assumed and if p
has length greater than 1, a subset ARz is assumed.
For example, to fit a subset model with lags 1 and 4 present set p to c(1,4) or
equivalently c(1,0,0,4). To fit a subset model with just lag 4, you must use
p=c(0,0,0,4) since p=4 will fit a full AR(4).}
\item{demean}{TRUE, mean estimated. FALSE, mean is zero. }
\item{MeanMLEQ}{use exact MLE for mean parameter }
\item{lag.max}{ the residual autocorrelations are tabulated for lags 1, \dots, lag.max. Also
lag.max is used for the Ljung-Box portmanteau test. }
}
\details{
The model and its properties are discussed in McLeod and Zhang (2006)
and McLeod and Zhang (2008).
}
\value{
A list with class name "FitAR" and components:
\item{loglikelihood }{value of the loglikelihood}
\item{phiHat }{coefficients in AR(p) -- including 0's}
\item{sigsqHat }{innovation variance estimate}
\item{muHat }{estimate of the mean}
\item{covHat }{covariance matrix of the coefficient estimates}
\item{zetaHat }{transformed parameters, length(zetaHat) = \# coefficients estimated}
\item{RacfMatrix }{residual autocorrelations and sd for lags 1, \dots, lag.max}
\item{LjungBox}{table of Ljung-Box portmanteau test statistics}
\item{SubsetQ }{parameters in AR(p) -- including 0's}
\item{res}{innovation residuals, same length as z}
\item{fits}{fitted values, same length as z}
\item{pvec }{lags used in AR model}
\item{demean }{TRUE if mean estimated otherwise assumed zero}
\item{FitMethod }{"MLE" or "LS"}
\item{IterationCount }{number of iterations in mean mle estimation}
\item{convergence }{value returned by optim -- should be 0}
\item{MLEMeanQ }{TRUE if mle for mean algorithm used}
\item{ARModel}{"ARp" if FitARp used, otherwise "ARz"}
\item{tsp}{tsp(z)}
\item{call}{result from match.call() showing how the function was called}
\item{ModelTitle}{description of model}
\item{DataTitle}{returns attr(z,"title") }
\item{z}{time series data input) }
}
\references{
McLeod, A.I. and Zhang, Y. (2006).
Partial Autocorrelation Parameterization for Subset Autoregression.
Journal of Time Series Analysis, 27, 599-612.
McLeod, A.I. and Zhang, Y. (2008a). Faster ARMA Maximum Likelihood Estimation,
Computational Statistics and Data Analysis,
52-4, 2166-2176.
DOI link: \url{http://dx.doi.org/10.1016/j.csda.2007.07.020}.
McLeod, A.I. and Zhang, Y. (2008b, Submitted).
Improved Subset Autoregression: With R Package.
Journal of Statistical Software.
}
\author{ A.I. McLeod }
\note{
Normally one would use the \code{FitAR} function which
then calls this function for the ARz case.
}
\seealso{
\code{\link{FitAR}},
\code{\link{FitARp}},
\code{\link{GetFitARz}},
\code{\link{GetFitARpMLE}},
\code{\link{RacfPlot}}
}
\examples{
#First Example: Fit exact MLE to AR(4)
set.seed(3323)
phi<-c(2.7607,-3.8106,2.6535,-0.9238)
z<-SimulateGaussianAR(phi,1000)
ans<-FitARz(z,4,MeanMLEQ=TRUE)
ans
coef(ans)
\dontrun{#save time building package
#Second Example: compare with sample mean result
ans<-FitARz(z,4)
coef(ans)
#Third Example: Fit subset ARz
z<-log(lynx)
FitARz(z, c(1,2,4,7,10,11))
#now obain exact MLE for Mean as well
FitARz(z, c(1,2,4,7,10,11), MeanMLE=TRUE)
#Fourth Example: Fit subset ARz
somePACF<-c(0.5,0,0,0,-0.9)
someAR<-PacfToAR(somePACF)
z<-SimulateGaussianAR(someAR,1000)
ans=FitARz(z, c(1,5),MeanMLEQ=TRUE)
coef(ans)
GetFitARz(z,c(1,5))#assuming a known zero mean
}
}
\keyword{ ts }
|
d01d474c4f3e95b5921d2fae8964bcacab410c82 | 813a1779df54e2f23976f55780ec3e0acee49128 | /simulation_code_and_results/common_efron.R | 688744fe3ccb1e9c6e93ad195efcd0dea45ebc7d | [
"MIT"
] | permissive | kapelner/sequential_matching_simulations_2 | dcce09135447c01be873aaec119c96049be76caf | 25137e8a378c0e903fd2fcfb1982e68dd2adeb5e | refs/heads/master | 2021-01-13T01:50:38.743716 | 2013-12-25T01:01:13 | 2013-12-25T01:01:13 | 13,599,365 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 804 | r | common_efron.R |
#initialize the indicator treatment vector
indic_T = array(NA, n)
EFRON_ALPHA = 2 / 3 #his personal favorite
#now we're going to go through and do the matching
for (i_match in 1 : n){
if (sum(indic_T == 1, na.rm = TRUE) == sum(indic_T == 0, na.rm = TRUE)){
indic_T[i_match] = rbinom(1, 1, prob_trt)
} else if (sum(indic_T == 1, na.rm = TRUE) < sum(indic_T == 0, na.rm = TRUE)){
indic_T[i_match] = rbinom(1, 1, EFRON_ALPHA)
} else if (sum(indic_T == 1, na.rm = TRUE) > sum(indic_T == 0, na.rm = TRUE)){
indic_T[i_match] = rbinom(1, 1, 1 - EFRON_ALPHA)
}
}
#create response vector
source(paste("create_response_", response_model, ".R", sep = ""))
#create design matrix
Xy = as.data.frame(cbind(x_s, indic_T, y))
#pull out yT, yC
yTs = Xy[Xy$indic_T == 1, "y"]
yCs = Xy[Xy$indic_T == 0, "y"] |
c432a9a7ba7f4f9b4c9848e4f3a215b1cfa95ad0 | 6a28ba69be875841ddc9e71ca6af5956110efcb2 | /Probability_And_Statistics_For_Engineers_by_Richard_L._Scheaffer,_Madhuri_S._Mulekar,_James_T._Mcclave/CH6/EX6.26/ex_6_26.R | e254869a7266b43d78b1453bd9edeaaefdf9ae71 | [] | permissive | FOSSEE/R_TBC_Uploads | 1ea929010b46babb1842b3efe0ed34be0deea3c0 | 8ab94daf80307aee399c246682cb79ccf6e9c282 | refs/heads/master | 2023-04-15T04:36:13.331525 | 2023-03-15T18:39:42 | 2023-03-15T18:39:42 | 212,745,783 | 0 | 3 | MIT | 2019-10-04T06:57:33 | 2019-10-04T05:57:19 | null | UTF-8 | R | false | false | 231 | r | ex_6_26.R |
# USing Weibull Distribution
Theeta =50
gama = 2
#a
cat("P(X>10) = ", 1 - pweibull(10,shape = gama,scale = sqrt(Theeta)))
#b
cat(" Expected lifetime of thermisters is E(X) =", (Theeta^(1/gama))*(gamma(1+(1/gama))))
|
8e3ab42df7bca90ed2ad36c776955eb6312629fa | 1c9e02b3e531f1dad978afff5fd863e6fe007383 | /R/fw13_createRawDataframe.R | 9d17a87f3d06ca1b8e0fdb25a91bfc5a4d1b7300 | [] | no_license | mgjust/RAWSmet | 95d23fdc45bba8e3cfa3974f85847e8b077ac4ac | 79bad67e4d8a13a966dc19c40144bddba44e6254 | refs/heads/master | 2023-02-18T21:39:20.503427 | 2021-01-19T00:25:55 | 2021-01-19T00:25:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,321 | r | fw13_createRawDataframe.R | #' @export
#' @importFrom rlang .data
#' @import MazamaCoreUtils
#'
#' @title Obtain RAWS FW13 data and parse into a tibble
#'
#' @param nwsID Station identifier.
#' @param baseUrl Base URL for data queries.
#' @param verbose Logical flag controlling detailed progress statements.
#'
#' @return Raw tibble of RAWS data.
#'
#' @description Obtains station data from a webservice and converts
#' it into a quality controlled, metadata enhanced "raw" tibble
#' ready for use with all \code{raw_~} functions.
#'
#' Steps involved include:
#'
#' \enumerate{
#' \item{download data text}
#' \item{parse data text}
#' }
#'
#' @examples
#' \dontrun{
#' library(RAWSmet)
#'
#' tbl <- fw13_createRawDataframe(nwsID = 500742)
#' dplyr::glimpse(tbl)
#' }
#'
#' @seealso \code{\link{fw13_downloadData}}
#'
#' @references \href{https://cefa.dri.edu/raws/}{Program for Climate, Ecosystem and Fire Applications}
#' @references \href{https://fam.nwcg.gov/fam-web/weatherfirecd/13.htm}{FW13 Data Format}
fw13_createRawDataframe <- function(
nwsID = NULL,
baseUrl = "https://cefa.dri.edu/raws/fw13/",
verbose = FALSE
) {
# ----- Validate parameters --------------------------------------------------
MazamaCoreUtils::stopIfNull(nwsID)
# Guarantee it is zero padded six characters
nwsID <- stringr::str_pad(nwsID, 6, pad = "0")
# ----- Download/parse data --------------------------------------------------
# Read in FW13 data
fileString <- fw13_downloadData(nwsID, baseUrl)
returnEmptyTibble <- FALSE
if ( fileString == "" ) {
if ( MazamaCoreUtils::logger.isInitialized() ) {
logger.warn("RAWS data service failed for nwsID: '%s'", nwsID)
}
# NOTE: HACK solution to return an empty tibble
returnEmptyTibble <- TRUE
fileString <- "W13500742200507131500R 72 36261 4 72 48100 34 1 20 213 713200 10 \n"
}
# Read fwf raw data into a tibble
# Set these manually. The website that stores this information times out often.
# This information originally came from https://fam.nwcg.gov/fam-web/weatherfirecd/13.htm
widths = c(3, 6, 8, 4, 1, 1, 3, 3, 3, 3, 2, 3, 3, 3, 3, 2, 5, 1, 2, 2, 1, 1 , 1, 4, 3, 3, 1)
col_types <- "cccc ccnn nnnnn nnnn cnnc ccnn nc" %>% stringr::str_replace_all(" ","")
col_names <- c(
"recordType", "nwsID", "observationDate", "observationTime",
"observationType", "weatherCode", "dryBulbTemp", "atmosMoisture",
"windDirection", "avWindSpeed", "fuelMoisture", "maxTemp", "minTemp",
"maxRelHumidity", "minRelHumidity", "precipDuration", "precipAmount",
"wetFlag", "herbaceousGreenness", "shrubGreenness", "moistureType",
"measurementType", "seasonCode", "solarRadiation", "maxGustDirection",
"maxGustSpeed", "snowFlag"
)
col_positions <- readr::fwf_widths(
widths = widths,
col_names = col_names
)
# Read in raw data
tbl <-
readr::read_fwf(
file = fileString,
col_positions = col_positions,
col_types = col_types,
progress = verbose
)
# NOTE: HACK solution to return an empty tibble
if ( exists("returnEmptyTibble") && returnEmptyTibble )
tbl <- tbl %>% dplyr::filter(nwsID == "Rumplestiltskin")
# ----- Return ---------------------------------------------------------------
return(tbl)
}
|
7a22d9359c7120c2ee1111ebba2c1a06b58e1f56 | 45f9cb0b85b684e2e4d68304971996441c005848 | /ParamInferenceMBaiki.R | 7da4b5fe40263ec934395eab5558cbf00a4bf815 | [] | no_license | EricTonygo/MyShinyApp | bdfa442439ad1487b8f233fd1a5e3469f28b9819 | 4283544fa610766cac57fa9365e5b17fbda61c26 | refs/heads/master | 2020-07-22T22:38:49.491289 | 2016-08-31T06:34:19 | 2016-08-31T06:34:19 | 66,434,113 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,203 | r | ParamInferenceMBaiki.R | ################################################################
# Parameter for the inference of the models for MBaiki dataset #
################################################################
# WARNING : Modifying the below parameters implies to run the function ClusteringWithFlexmix before FCM
# Diameter classes in cm
ClassesDiam=c(9,20,30,45,60,80,100,120)
NbClasse=length(ClassesDiam)
# Census plot aera ha
Surface=1
# name of the period used for plot
Lab.period="years"
# periodicity of the census
Nb.period=1
# Variables modèles recrutement
# NbArbresXX : effectifs de l'espèce dans la classe [XX XX+1]
# SSTXX : surface terrière du peuplement au dessus du diamètre XX
# EffectXX : effectifs du peuplement au-desssus du diamètre XX
#VarRecrut=c("STTC9","STTC30","STTC50","STTC70","STTC90","STTC110")
Models=list(Recruitment="RecruitmentFlexmix",Growth="GrowthFlexmix",Mortality="MortalityFlexmix")
VarRecrut=c("NbArbres20","NbArbres30","NbArbres45","NbArbres60","NbArbres80","NbArbres100","NbArbres120",
"STTC9","STTC20","STTC30","STTC45","STTC60","STTC80","STTC100","STTC120") #
# Esp?ces ? ne pas regrouper automatiquement
UserListApartSpecies=list()
|
259a83a77c3a95bd4fb66b79c95c1f0d13b7a95d | 1455cd41f97808910607bea910a7d1387797ca6e | /man/deferred_errors_flush.Rd | c5f64dcfae785056b34fc02e126f317d5387b0a7 | [
"MIT"
] | permissive | reside-ic/defer | 415e986e86a65d08bc2f3b02466a9fb51d6960b7 | b9f933772c59974dc904de09cdcc4972f0812f0c | refs/heads/master | 2020-07-01T15:20:27.418962 | 2019-08-13T16:01:00 | 2019-08-13T16:01:19 | 201,209,156 | 9 | 0 | null | null | null | null | UTF-8 | R | false | true | 674 | rd | deferred_errors_flush.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/defer.R
\name{deferred_errors_flush}
\alias{deferred_errors_flush}
\title{Flush deferred errors}
\usage{
deferred_errors_flush()
}
\description{
Within a \code{\link{defer_errors}} block, flush any deferred
errors, turning them into realised errors. If no deferrable
errors have occurred, this function has no effect.
}
\examples{
check_positive <- function(x) {
if (x < 0) {
deferrable_error(paste("got a negative number:", x))
}
}
err <- tryCatch(
defer::defer_errors({
check_positive(-1)
defer::deferred_errors_flush()
check_positive(-2)
}),
error = identity)
err
}
|
b71dd59ce8e7e64b5767cc046d96ed941a7d621b | 5fa68e8446818a831bef92514f08ea49e02cd4dc | /src/extras/read_quality.R | 8b7edfe1c7ce89045d2efccda5937bfb49a5de75 | [] | no_license | rajwanir/flongle_actinomycetes_paper | da44f513657001582429163f4cdda65f69d84f91 | 60675446d838413e16373ab040baaecfcd91c60f | refs/heads/main | 2023-07-12T18:28:06.859935 | 2021-08-06T16:03:40 | 2021-08-06T16:03:40 | 393,417,935 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 423 | r | read_quality.R | library(tidyverse)
# library(nanoR)
guppy_stats = read.csv("data/rawdata/vanRsoilactino_10222020/basecalled_fastq/sequencing_summary.txt",
sep = '\t')
ggplot(data=guppy_stats %>% filter(mean_qscore_template<7),
aes(x=sequence_length_template))+
geom_histogram(binwidth = 1) +
scale_x_continuous(breaks = 1:15)
guppy_stats %>% group_by(channel) %>% summarize(output = sum(sequence_length_template)) |
861a390946a14a866e627a866c182ca4e0cd05a3 | 02f053ce70b065724d4a02619fb402adcc0ec997 | /analysis/boot/boot392.R | 2620c8e0f679b2b0375815958acdf40c1814eb0b | [] | no_license | patperry/interaction-proc | 27950482929240bba55c7d0f2f8c5235d770feea | cf8dfd6b5e1d0684bc1e67e012bf8b8a3e2225a4 | refs/heads/master | 2021-01-01T06:11:47.125853 | 2012-12-04T20:01:42 | 2012-12-04T20:01:42 | 673,564 | 1 | 3 | null | null | null | null | UTF-8 | R | false | false | 3,751 | r | boot392.R | seed <- 392
log.wt <- 0.0
penalty <- 2.8115950178536287e-8
intervals.send <- c()
intervals.recv <- c(56, 112, 225, 450, 900, 1800, 3600, 7200, 14400, 28800, 57600, 115200, 230400, 460800, 921600, 1843200, 3686400, 7372800, 14745600, 29491200, 58982400)
dev.null <- 358759.0022669336
df.null <- 35567
dev.resid <- 225771.87625360364
df.resid <- 35402
df <- 165
coefs <- c(6.88847341507195, 5.629375822284074, 5.732755650790626, 5.298946883198586, 5.008017365800643, 4.7998221297493195, 4.7430479606284655, 4.629275688310896, 4.388537775421537, 4.287962536446685, 4.316563181288979, 4.148341629757787, 4.025114062888365, 3.9579634615622328, 3.747455456973522, 3.560976810749075, 3.2406774183465723, 2.9357920259801027, 2.5191559231204756, 2.078670471353085, 1.657240145079284, 0.9281385652203556, 1.1072929102808968, 0.7351952706220032, 0.3668269450949401, -1.0921682726136177, -2.4090433990371266e-2, 1.0348811583260749, 1.1254570463488252, -1.811973154062397, -3.0061187231462316, -2.6005422913797873, -0.6373620884458159, 0.7638693758370083, 1.4551389024858843, -1.4947308950530076, 0.2455430148246304, -1.0076293955347306, -0.2718842099773957, -1.0460081285426577, 0.9373129900620467, 1.0123096269677676, -0.6991708531981045, -2.449992547384692, -1.184474438128662, -0.7034969476233538, -0.8206993991692593, 0.13562198460715916, 1.4090221404614854e-2, -0.7115483824159604, -0.43904836638984784, 1.1236780144245881, -2.8569025969848694, 1.7481917205950412, 0.8771833350467311, 1.1578366410785979, -1.7612212302337569, -0.2689313305218899, -0.21857597559372496, 1.5062893057088205, 1.2057015081344438, 0.7986417964491808, -1.2974260716401265, -1.3344136944970157, -0.6649904891282967, 0.29540881775322864, 0.7625370611778073, -0.3823088219978774, -1.1516498358916258, -0.6351848638269922, -2.8585671662566474, -0.27463587285848695, 0.46831005039649803, 0.959786217562089, 0.687026920949164, -0.6223720981636836, -0.9662862944676728, -1.5656837933548613, -0.12466070110222353, 0.8241795497391291, 1.2177616788952605, 0.14868976155321256, 0.1594027622127132, -1.9724355711167212, -1.2585011259696166, 0.27491645142247506, 1.172937221388302, 0.5709077140572019, 0.9184164799044169, -1.8447171525887676, 0.5720823228146834, 0.7308134398879051, 0.7450570598380322, 0.42417346769571673, 0.28455836336968776, 1.4835419057311001, -0.37480928188303547, 0.31960365345853337, 0.10805574850494458, -7.699312411311561e-4, 0.5440560287307484, -0.1649555998328457, 0.9274335442711128, 0.21325297281182926, 0.8265431898787706, 0.8371256007030434, 1.174220450743243, -0.2690969595318159, -0.4591091777932888, -0.7906073411628524, 0.4238150658830818, 0.7110200140053913, 1.589056213401232, -0.3251068523568524, -0.12730255092294207, -0.8334855875268666, 0.7878900723889947, -0.31368756107349793, 0.4765988025023413, 0.5615632543353405, -0.32515870117357054, -0.37564170342297304, -1.1498450344504039, -0.9095845386735686, 0.2524833942511014, 0.7636538163508811, 3.951630975950948e-2, 1.052726124958666, -0.4017435139663349, -0.3492955317114017, 0.46996813608263566, 0.835905652602745, 0.8052043988305272, 0.4266518581106711, -5.633008163409558e-2, 1.1746508093809147, -0.3205998989637414, 0.9353107459940764, 0.7518469592284093, 1.0591570243118986, 0.7455551704442738, -0.7368687421151779, -1.3312502865179232, 0.7691794759605435, 0.4924597276595318, 0.4730466965789128, -0.3093812255282972, -0.6773931327208864, -2.119844122079584, 1.198764595950555, 0.10410763619415472, 1.2714191643132022, -0.3459123792764597, -9.453179413705128e-2, 0.10191249986018122, -2.0163975510076937, -1.180557579525702, 0.7249484215682872, 1.1561704668848798, -2.4596186355222614e-2, 1.5618412489257891, -0.241202771416015, -7.745434012286624e-2, -4.305566881208094e-2, 1.1595563144972232)
|
becf59ac27a72861719e2f6352315d483328bf2c | a4b54038d49ebdb4d9e222cc94f12e3634e9db3a | /scripts/plot_functions.R | 78c7f8e204ddd6a85e02f9b2bd72f5b54103051e | [
"MIT"
] | permissive | wdoyle42/fed_projections | eb0956a666b53cfe01f7546049c5a6f5f27627f9 | f7f4cd07508eccbbdcf2b011da0ec792e33d4024 | refs/heads/master | 2020-05-16T05:13:19.139284 | 2019-04-23T18:43:02 | 2019-04-23T18:43:02 | 182,809,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,960 | r | plot_functions.R | ## Functions for gifted mapping app
library(albersusa)
library(tidyverse)
library(plotly)
library(readxl)
library(scales)
library(sf)
library(readxl)
library(usmap)
library(leaflet)
library(sp)
library(sf)
library(tigris)
library(albersusa)
library(htmlwidgets)
library(scales)
library(RColorBrewer)
library(tidyverse)
gg_state_plot<-function(df,var,groupvar,axis_label){
## gg state_plot:
## takes options
## df: data
## var: variable
## groupvar: grouping variable
## axis_label: x axis label
df$groupvar<-unlist(df[groupvar])
df$v<-unlist(df[var])
top.percent<-1
bottom.percent<-0
#Number of levels to cut by
n.levels<-10
## Cuts
## Using max makes bad ranges, use top percentile (set above as constant) instead.
mymax<-quantile(df$v
,top.percent,na.rm=TRUE)
mymin<-quantile(unlist(df$v)
,bottom.percent,na.rm=TRUE)
mylevels<-cbreaks(range=c(mymin,mymax),
pretty_breaks(n.levels,high.u.bias=1000),
labels=comma_format())
##Change those labels into ranges
i<-2:length(mylevels$labels)
mynicelevels<-NULL
mynicelevels[i-1]<-paste0(mylevels$labels[i-1],"-",mylevels$labels[i])
##Take all the "v" data and create nice groups for it.
##Apply ranges as defined above and add to the existing data
df$vcut<-findInterval(unlist(df[var]),vec=mylevels$breaks)
df$vcut<-factor(df$vcut,
levels=(i-1),
labels=mynicelevels,
ordered=TRUE)
## Create palette, might want to match with plot above
pal<- (brewer.pal(length(mylevels$breaks), 'RdYlGn'))
fpal <- colorFactor(pal = pal,
domain = df$vcut,
ordered = TRUE)
myval<-fpal(df$vcut)
df$`State`<-paste0(df$state,"= ",round(df$v,1))
gg<-ggplot(df,aes(text=paste0(groupvar,": ", prettyNum(round(v), big.mark = ","))))
gg<-gg+geom_bar(aes(
x=fct_reorder(.f=as_factor(groupvar),
.x=v),
y=v,
fill=vcut),
stat="identity")
gg<-gg+scale_fill_manual(values =pal)
gg<-gg+coord_flip()
gg<-gg+xlab("")+ylab(axis_label)
gg<-gg+theme_minimal()
gg<-gg+theme(axis.text.y=element_text(size=7,angle=15))
gg<-gg+theme(legend.position="none")
outplot<-ggplotly(gg,tooltip="text")
outplot
}
## Mapping Function
map_gen<-function(v,geo_df,legend_label){
# This is a function to generate a map linked to plots
## of data, it takes one argument "v" which specifies
## the variable to use
## geo_df= geographic data frame
## legend_label: title of legend
geo_df$v<-geo_df[v][[1]]
## Top percent used to set range
top.percent<-1
bottom.percent<-0
#Number of levels to cut by
n.levels<-10
## Cuts
## Using max makes bad ranges, use top percentile (set above as constant) instead.
mymax<-quantile(geo_df$v,
top.percent,na.rm=TRUE)
mymin<-quantile(geo_df$v,
bottom.percent,na.rm=TRUE)
mylevels<-cbreaks(range=c(mymin,mymax),
pretty_breaks(n.levels,high.u.bias=1000),
labels=comma_format())
##Change those labels into ranges
i<-2:length(mylevels$labels)
mynicelevels<-NULL
mynicelevels[i-1]<-paste0(mylevels$labels[i-1],"-",mylevels$labels[i])
##Take all the "v" data and create nice groups for it.
##Apply ranges as defined above and add to the existing data
geo_df$vcut<-findInterval(geo_df$v,vec=mylevels$breaks)
geo_df$vcut<-factor(geo_df$vcut,
levels=(i-1),
labels=mynicelevels,
ordered=TRUE)
## Create palette, might want to match with plot above
pal<- (brewer.pal(length(mylevels$breaks), 'RdYlGn'))
fpal <- colorFactor(pal = pal,
domain = geo_df$vcut,
ordered = TRUE)
## Create a label for each state that links to
## external plots
state_pop<-paste0(
geo_df$name,"</a><b>",
'<br/> ',
v,
": ",
prettyNum((geo_df$v),digits=1)
)
## Set line weights
myweight=1
myopacity=.5
## Set projection
epsg2163 <- leafletCRS(
crsClass = "L.Proj.CRS",
code = "EPSG:2163",
proj4def = "+proj=laea +lat_0=45 +lon_0=-100 +x_0=0 +y_0=0 +a=6370997 +b=6370997 +units=m +no_defs",
resolutions = 2^(16:7))
## Create leaflet map
out_map<-leaflet(data = geo_df,
options = leafletOptions(crs = epsg2163)) %>%
addPolygons(
color = 'black',
weight = myweight,
opacity=myopacity,
fillColor = fpal(geo_df$vcut),
fillOpacity = 0.75,
popup=state_pop)%>%
addLegend('bottomright',
title=legend_label,
pal = fpal,
values = geo_df$vcut
)%>%
setView(lng = -98.35, lat = 39.50, zoom = 3)
} # End function
|
a7138c23616b1c712c244cb1b72541038f972650 | 13aa67012a9cb7f3d5bb0f0eb30b8088e8e33203 | /R/plotprofile.R | 84fa2304a878d9e5c10ef0b133466e59abf9a11e | [] | no_license | yaomin/DESnowball | 0db4e7afd8693e42c0c61870a032f227f4affc69 | cb053e96615d71b7e761a90fc25392b5f02c82a1 | refs/heads/master | 2020-05-21T06:59:33.352890 | 2014-09-22T16:52:21 | 2014-09-22T16:52:21 | 14,316,645 | 1 | 0 | null | 2014-09-26T20:55:29 | 2013-11-11T23:27:39 | R | UTF-8 | R | false | false | 56 | r | plotprofile.R | plotprofile <-
function(x,...) UseMethod('plotprofile')
|
bb5bbd975a0964d2ed07fca450f006021fab64f5 | ee84363f1dbc3c0dfc28f8a385730b1b1ae0a016 | /2021/10-08-2021/10082021.R | 322113df9b41b923922e64477b7ae05ae40e1f81 | [] | no_license | Rohan4201/tidytuesday-2 | 15455f4b7c1c214a105974ebca5a77cea7acdc8a | 08841bf71f571af945f36dc0cb3fc289c55e804b | refs/heads/main | 2023-07-19T05:52:02.701163 | 2021-08-31T18:51:01 | 2021-08-31T18:51:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,238 | r | 10082021.R | library(tidyverse)
library(extrafont)
#library(patchwork)
tuesdata <- tidytuesdayR::tt_load('2021-08-10')
investment <- tuesdata$investment
ipd <- tuesdata$ipd
chain <- tuesdata$chain_investment
#president data
new_presidential <- presidential %>% add_row(name="Truman", start=as.Date("1947-01-01"), end=as.Date("1953-01-20"), party="Democratic", .before = 1) %>%
add_row(name="Trump", start=as.Date("2017-01-20"), end=as.Date("2021-01-20"), party="Republican")
#prep plot data
plot_data <- chain %>% filter(meta_cat %in% c("Health","Digital","Education","Conservation and development","Power")) %>% group_by(year, meta_cat) %>% summarise(total=sum(gross_inv_chain))
plot_data$new_date = as.Date(paste(plot_data$year, 1, 1, sep = "-"))
plot_data$meta_cat <- recode(plot_data$meta_cat, "Conservation and development" = "Conservation")
#make plot
p <- ggplot() +
geom_rect(data=new_presidential, mapping=aes(xmin=start, xmax=end, ymin=-Inf, ymax=Inf, fill=party, alpha=party)) +
scale_fill_manual("", values=c("Democratic"="#0000ff", "Republican"="#e50000")) +
scale_alpha_manual("", values=c("Democratic"=0.2, "Republican"=0.2)) +
geom_line(data=plot_data, mapping=aes(x=new_date, y=total)) +
facet_grid(meta_cat~., scales='free_y') +
scale_x_date(limits=c(as.Date("1947-01-01"), as.Date("2022-01-01")), breaks=as.Date(c("1960-01-01", "1980-01-01", "2000-01-01", "2020-01-01")), labels=c("1960", "1980", "2000", "2020")) +
coord_cartesian(expand=F) +
labs(x="", y="Gross investment in millions of USD\n(adjusted for inflation)\n",
caption= "N. Rennie | Data: Bureau of Economic Analysis",
title="INFRASTRUCTURE\nINVESTMENT",
subtitle="Whilst investment in digital, education, health, and power have\ncontinued to increase, investment in conservation has dropped\nsince the mid 1960s.\n") +
theme(plot.background = element_rect(fill = "#FAF9F6", colour="#FAF9F6"),
panel.background = element_rect(fill = "#FAF9F6", colour="#FAF9F6"),
legend.background = element_rect(fill = "#FAF9F6", colour="#FAF9F6"),
legend.key = element_rect(fill = NA),
strip.background =element_rect(fill=alpha("#50487a",0.2)),
strip.text = element_text(colour = '#50487a', family="Gill Sans MT", size=12),
legend.position="bottom",
panel.spacing = unit(1.5, "lines"),
legend.margin=margin(t = 0, unit='cm'),
axis.text = element_text(colour = "#50487a", size=12, hjust = 0.5, family="Bodoni MT"),
axis.title = element_text(colour = "#50487a", size=14, hjust = 0.5, family="Bodoni MT"),
legend.text = element_text(colour = "#50487a", size=12, hjust = 0.5, family="Bodoni MT"),
plot.title = element_text(colour = "#50487a", size=28, hjust = 0, family="Bodoni MT Black"),
plot.subtitle = element_text(colour = "#50487a", size=16, hjust = 0, family="Bodoni MT"),
plot.caption = element_text(colour = "#50487a", size=12, hjust = 0.5, family="Bodoni MT"),
plot.margin = unit(c(0.5, 0.5, 0.5, 0.5), "cm"), #top, right, bottom, left
panel.grid.major.x = element_blank(),
panel.grid.major.y = element_blank(),
panel.grid.minor = element_blank())
p
|
26bf993879659685bea9dda38a3964a695b7c279 | 7e7bb7bfdf62c24b7fecf78f5247d28839728710 | /MAP Analysis/2014-2015/src/7th_grade_KAMS_reading.R | 1e2a9235a6d54ad8e7b37dba2789de80cf989b3a | [] | no_license | kippchicago/Data_Analysis | 1ad042d24c7a1e11e364f39c694692f5829363a4 | 8854db83e5c60bc7941654d22cbe6b9c63613a7f | refs/heads/master | 2022-04-09T10:10:43.355762 | 2020-02-20T18:03:40 | 2020-02-20T18:03:40 | 5,903,341 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,225 | r | 7th_grade_KAMS_reading.R | map_mv$cdf %>%
left_join(map_mv$roster %>%
select(termname,
studentid,
studentlastname,
studentfirstname,
school),
by=c("termname", "studentid")) %>%
filter(grade == 7,
school == "KACP",
measurementscale == "Reading",
termname == "Spring 2014-2015") %>%
mutate(on_grade_level = "below",
on_grade_level = ifelse(testpercentile >=25,
"on",
on_grade_level),
on_grade_level = ifelse(testpercentile >= 75,
"above",
on_grade_level),
duration_hours = round(testdurationminutes/60,1)) %>%
select(school,
grade,
studentid,
last = studentlastname,
first =studentfirstname,
RIT_score = testritscore,
percentile = testpercentile,
quartile = testquartile,
on_grade_level,
duration_mins = testdurationminutes,
duration_hours
) %>%
arrange(desc(RIT_score), last, first) %>%
readr::write_csv("reports/Harrison_7th_KACP_Reading_Scores.csv")
|
54be4aec664f1770c8c821de74b787842b93bbc4 | 92f9c71721dbd2b4d6af9afc5ce6ce9db6840d3f | /i686-pc-linux-gnu/arm-xilinx-eabi/mep/bin/arm/arm7ejs.rd | 6b6390a22ffe4a32f77aae947f75f31ea3c60ac2 | [
"Apache-2.0"
] | permissive | Kayuii/arm_xilinx_linux_gnueabi | a845c9c8542b41786dca6f3652510936d367ec9e | a2348d16d170149716c9a2e27e798f306a0c48e7 | refs/heads/master | 2023-02-10T03:57:47.262945 | 2021-01-07T03:40:17 | 2021-01-07T03:40:17 | 327,490,208 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 79 | rd | arm7ejs.rd | // ARM7EJS CP15 Register Definition File
//
INCLUDE "arm926ejs.rd"
//
// <eof>
|
169dabb04c78483cc015bd0609a012a5b0b289b3 | 60b3cc53f8f94ea1fa6b903c2bdb90748f2002a0 | /R/batches-utils.R | 6dae4f3543c2718cd3ea0fef9fbc8f4774825440 | [
"MIT"
] | permissive | news-r/artm | 5835329f64e4031393b7a5fdee8bf0c529bda35b | 610340fa56aa0c19ec1fcacb4886665219487bdd | refs/heads/master | 2022-01-12T23:32:05.863580 | 2019-08-01T16:18:36 | 2019-08-01T16:18:36 | 198,298,264 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,196 | r | batches-utils.R | #' Batch Vectorizer
#'
#' @export
batch_vectorizer <- function(preprocessed_file, batches = NULL, collection_name = NULL,
data_format = c("vowpal_wabbit", "batches", "bow_uci", "bow_n_wd"), target_folder = NULL,
batch_size = 1000, batch_name_type = "code",
data_weight = 1.0, n_wd = NULL, vocabulary = NULL, gather_dictionary = TRUE,
class_ids = NULL, process_in_memory_model = NULL){
assert_that(!missing(preprocessed_file), msg = "Missing `preprocessed_file`")
assert_that(inherits(preprocessed_file, "preprocessed"), msg = "`preprocessed_file` is not of class `preprocessed`, see `as_preprocessed`")
output <- artm$BatchVectorizer(
batches = batches,
collection_name = collection_name,
data_path = preprocessed_file$file,
data_format = match.arg(data_format),
target_folder = target_folder,
batch_size = batch_size,
batch_name_type = batch_name_type,
data_weight = data_weight,
n_wd = n_wd,
vocabulary = vocabulary,
gather_dictionary = gather_dictionary,
class_ids = class_ids,
process_in_memory_model = process_in_memory_model
)
if(file$temp)
unlink(file$file, recursive = TRUE)
return(output)
} |
0b76969d7e72656c83aa3b8e785638f5d2b4db1e | c9f874e7b459735d7808ce0964da891c5ba8ac9e | /R/utils.R | 125f5ac49b3bd7e5d7bde258be1afdc22d6e72a6 | [
"MIT"
] | permissive | Covid19R/covid19nytimes | 606f0cdf3903481bf18ac7d3b8c85b5fb2456a7f | 024299451b8fb004ee3897599ee176a39cebc704 | refs/heads/master | 2021-05-17T18:31:52.168215 | 2020-05-15T13:56:15 | 2020-05-15T13:56:15 | 250,919,721 | 27 | 6 | NOASSERTION | 2020-04-07T20:28:57 | 2020-03-29T00:09:38 | R | UTF-8 | R | false | false | 1,159 | r | utils.R | reshape_raw <- function(tbl, location_col) {
tbl %>%
dplyr::rename(
cases_total = cases,
deaths_total = deaths,
fips_code = fips
) %>%
tidyr::pivot_longer(
cols = c(cases_total, deaths_total),
names_to = "data_type",
values_to = "value"
) %>%
tidyr::pivot_longer(
cols = {{ location_col }},
names_to = "location_type",
values_to = "location"
) %>%
tidyr::pivot_longer(
cols = fips_code,
names_to = "location_code_type",
values_to = "location_code"
) %>%
dplyr::select(
date,
location, location_type,
location_code, location_code_type,
data_type, value
) %>%
dplyr::arrange(
dplyr::desc(date), location
)
}
pull_unique <- function(tbl, col) {
q_col <- rlang::enquo(col)
tbl %>%
tidyr::drop_na(!!q_col) %>%
dplyr::pull(!!q_col) %>%
unique() %>%
stringr::str_c(collapse = ", ")
}
#' Pipe operator
#'
#' See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details.
#'
#' @name %>%
#' @rdname pipe
#' @keywords internal
#' @export
#' @importFrom magrittr %>%
#' @usage lhs \%>\% rhs
NULL
|
98ddc99dd3158d39acc18bf5d055115415b237d9 | c0bce42fcea5993c3d9976248c157f4a4433db0b | /figure_CLL/code/26_zscore_CNV_PT1.R | ab698464aaef6189095e438937d008cfb1103649 | [] | no_license | ChenPeizhan/mtscATACpaper_reproducibility | a01b22f43c6222a56e04e731d68de7440c3cfc76 | e543610bf29dbac1094994c54d3e7edd41609d5a | refs/heads/master | 2022-12-11T11:31:24.877462 | 2020-08-29T18:40:36 | 2020-08-29T18:40:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,170 | r | 26_zscore_CNV_PT1.R | library(SummarizedExperiment)
library(BuenColors)
library(dplyr)
library(Matrix)
library(BuenColors)
library(data.table)
library(ggbeeswarm)
library(dplyr)
library(matrixStats)
library(ComplexHeatmap)
library(circlize)
library(stringr)
# Get the typical profile of a fragment distribution
base_pbmcs <- readRDS("../../cnv_compute/output/atac_public_pbmcs_cnv.rds"); base_pbmcs[is.na(base_pbmcs)] <- 0
cpm_norm <- (t(t(base_pbmcs)/colSums(base_pbmcs)) * 100000)
row_means <- rowMeans(cpm_norm)
row_std <- sqrt(rowVars(cpm_norm))
mat <- readRDS("../../cnv_compute/output/CLL_PT1_CD19pos_cnv.rds"); mat[is.na(mat)] <- 0
clone_definition <- readRDS("../output/PT1_clone_definition.rds")
bcs <- as.character(clone_definition$cell_id)
mat <- mat[,bcs]
makeZscoreMat <- function(mat){
mat_cpm_norm <- (t(t(mat)/colSums(mat)) * 100000)
zscore_mat <- (mat_cpm_norm - row_means)/row_std
}
makeLog2Mat <- function(mat){
mat_cpm_norm <- (t(t(mat)/colSums(mat)) * 100000)
zscore_mat <- log2(mat_cpm_norm/(row_medians + 1))
zscore_mat
}
zscore <- makeZscoreMat(mat)
score_bs <- makeZscoreMat(base_pbmcs)
region_meta <- str_split_fixed(rownames(zscore), "_", 4)
clone_definition$zscore <- colMeans(zscore[region_meta[,1] == "chr12",])
baseline_df <- data.frame(
cell_id = "whatever",
cluster_id = c("b"),
zscore = colMeans(score_bs[region_meta[,1] == "chr12",])
)
vec_go <- c("firebrick", jdb_palettes[["flame_light"]], "grey")
p1 <- ggplot(rbind(clone_definition, baseline_df), aes(x = 1, y = zscore, color = cluster_id)) +
geom_boxplot(outlier.shape = NA, width = 0.1, fill = NA) +
scale_y_continuous(limits = c(-1, 3)) + labs(x = "Cell clone", y = "chr12 CNV z-score") +
pretty_plot() + L_border() + scale_color_manual(values = vec_go) +
theme(axis.text.x=element_blank(),
axis.ticks.x=element_blank()) +
theme(legend.position = "none")
cowplot::ggsave2(p1, file = "../plots/PT1_CNVplot.pdf", width = 2, height = 1.7)
# Compute fraction of chromsome 12 reads
# normal
f12n <- colSums(mat[region_meta[,1] == "chr12",])/colSums(mat)
f12c <- colSums(base_pbmcs[region_meta[,1] == "chr12",])/colSums(base_pbmcs)
mean(f12n)/mean(f12c)
|
a15f87c6976b4c9c1fc12e467069d556a1cfb19c | a6fce712325cd8ba786d36be33fe8d4402b97a4b | /Analyses with full sample/2 Behavioral/2b Eval_RT_Supplemental material.R | a837d2ddc5b50ebe62b2399eee837cd89f1734ff | [] | no_license | hivolpertes/ERP-fix-analyses | 57c6c61bef15376e3f9fb8d9c120cda5a7fe5293 | a5df46905694e211d208c3dccc98680a44249ecb | refs/heads/master | 2021-09-29T15:02:06.857877 | 2018-11-25T23:57:53 | 2018-11-25T23:57:53 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,691 | r | 2b Eval_RT_Supplemental material.R | require(dplyr)
require(tidyr)
require(ggplot2)
evalDat = read.delim("./Analyses with full sample/2 Behavioral/evalDat.txt")
# includes practice trials
# 512 trials per subject
# Practice: Trial = 1, SubTrial = 1...16
# First half: Trial = 2, SubTrial = 1...256
# Second half: Trial = 3, SubTrial = 256...512
# take out bad subs
badsubs = read.delim("./Analyses with full sample/Eval_badsubs.txt")
evalDat = evalDat[!(evalDat$Subject %in% badsubs$Subject),]
expEvalDat = evalDat[evalDat$Trial != 1,] # takes out practice trials
expEvalDat$Subject = factor(expEvalDat$Subject)
# just look at correct trials (doesn't include misses)
corTrials = expEvalDat[expEvalDat$responseAccDat == 2,]
# Look across individuals -------------------------------------------------
# create performance bias score for each subject
perf = group_by(corTrials, Subject, faceRace, wordVal) %>%
summarise(avgRT = mean(TargetWord.RT)) %>%
as.data.frame()
perf$Condition = paste(perf$faceRace, perf$wordVal, sep = "_")
perfBias = data.frame(Subject = NULL, perfBias = NULL)
for (i in unique(perf$Subject)) {
temp = perf[perf$Subject == i,]
B = temp$avgRT[2] - temp$avgRT[1]
W = temp$avgRT[3] - temp$avgRT[4]
b = data.frame(Subject = i, perfBias = B + W)
perfBias = rbind(perfBias, b)
}
plot(perfBias$Subject, perfBias$perfBias)
hist(perfBias$perfBias, breaks = 25,
xlab = "Performance Bias", main = "")
perfBias[order(perfBias$perfBias),]
# most biased
ggplot(corTrials[corTrials$Subject ==28,], aes(faceRace, TargetWord.RT, fill = wordVal)) +
stat_summary(fun.y = mean, geom = "bar", position = "dodge") +
stat_summary(fun.data = mean_cl_normal, geom = "errorbar", position = position_dodge(width=.9), width = .2) +
facet_wrap(~Subject) +
coord_cartesian(ylim=c(400, 500)) +
labs(x="Race of face prime", y="Reaction Time (ms)") +
theme_bw() +
theme(axis.title.x = element_text(size=20),
axis.title.y = element_text(size=20),
legend.title = element_blank(),
legend.text = element_text(size=16),
strip.text = element_text(size=20),
axis.text.x = element_text(size=16),
axis.text.y = element_text(size=14)) +
scale_fill_manual(values=c("grey55", "grey75")) +
guides(fill=F)
ggplot(corTrials[corTrials$Subject ==58,], aes(faceRace, TargetWord.RT, fill = wordVal)) +
stat_summary(fun.y = mean, geom = "bar", position = "dodge") +
stat_summary(fun.data = mean_cl_normal, geom = "errorbar", position = position_dodge(width=.9), width = .2) +
facet_wrap(~Subject) +
coord_cartesian(ylim=c(475, 575)) +
labs(x="Race of face prime", y="Reaction Time (ms)") +
theme_bw() +
theme(axis.title.x = element_text(size=20),
axis.title.y = element_text(size=20),
legend.title = element_blank(),
legend.text = element_text(size=16),
strip.text = element_text(size=20),
axis.text.x = element_text(size=16),
axis.text.y = element_text(size=14)) +
scale_fill_manual(values=c("grey55", "grey75"))+
guides(fill=F)
# least biased
ggplot(corTrials[corTrials$Subject ==4,], aes(faceRace, TargetWord.RT, fill = wordVal)) +
stat_summary(fun.y = mean, geom = "bar", position = "dodge") +
stat_summary(fun.data = mean_cl_normal, geom = "errorbar", position = position_dodge(width=.9), width = .2) +
facet_wrap(~Subject) +
coord_cartesian(ylim=c(440, 540)) +
labs(x="Race of face prime", y="Reaction Time (ms)") +
theme_bw() +
theme(axis.title.x = element_text(size=20),
axis.title.y = element_text(size=20),
legend.title = element_blank(),
legend.text = element_text(size=16),
strip.text = element_text(size=20),
axis.text.x = element_text(size=16),
axis.text.y = element_text(size=14)) +
scale_fill_manual(values=c("grey55", "grey75"))+
guides(fill=F)
# middle
ggplot(corTrials[corTrials$Subject ==63,], aes(faceRace, TargetWord.RT, fill = wordVal)) +
stat_summary(fun.y = mean, geom = "bar", position = "dodge") +
stat_summary(fun.data = mean_cl_normal, geom = "errorbar", position = position_dodge(width=.9), width = .2) +
facet_wrap(~Subject) +
coord_cartesian(ylim=c(500, 600)) +
labs(x="Race of face prime", y="Reaction Time (ms)") +
theme_bw() +
theme(axis.title.x = element_text(size=20),
axis.title.y = element_text(size=20),
legend.title = element_blank(),
legend.text = element_text(size=16),
strip.text = element_text(size=20),
axis.text.x = element_text(size=16),
axis.text.y = element_text(size=14)) +
scale_fill_manual(values=c("grey55", "grey75"))+
guides(fill=F)
# Look at effect across task ----------------------------------------------
# plot over course of task
corTrials$Condition = paste(corTrials$faceRace, corTrials$wordVal, sep = "_")
condense = select(corTrials, Condition, TargetWord.RT, SubTrial) %>%
group_by(SubTrial, Condition) %>%
summarise(avgRT = mean(TargetWord.RT)) %>%
as.data.frame()
ggplot(condense, aes(SubTrial, avgRT, alpha = Condition, color = Condition, shape = Condition, linetype = Condition)) +
geom_point() +
geom_smooth(method = "lm", se=F, lwd = 1.3) +
labs(x = "Trial", y = "Reaction Time (ms)") +
scale_shape_manual(values=c(1,19,1,19)) +
scale_alpha_manual(values=c(.7,.5,.7,.5)) +
scale_linetype_manual(values = c("solid", "longdash", "solid", "longdash")) +
scale_color_manual(values=c("black", "black", "gray65", "gray65")) +
theme_bw() +
scale_y_continuous(limits=c(420,600)) +
guides(fill=F)
m1 = lmer(TargetWord.RT ~ faceRace * wordVal * SubTrial +
(wordVal|Subject) +
(1|TargetWord), data=corTrials)
summary(m1)
|
66b1f6e54785f5035b3cc6465a2b2fe6bdffbbd1 | f995cb1d7908397cfe56492fa65419914ee9a334 | /R/OptSig.Chisq.R | 415b02987a1219a64205e268b0283849ec1c09bc | [] | no_license | cran/OptSig | 2c0f15382ba344787e898cd4675589e2bc4ab863 | 2fbf7b2e65234a14d88d32245210faeb8c5164d2 | refs/heads/master | 2022-07-12T19:53:34.684022 | 2022-07-03T11:30:14 | 2022-07-03T11:30:14 | 114,993,984 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 518 | r | OptSig.Chisq.R | OptSig.Chisq <-
function(w=NULL,N=NULL, ncp=NULL, df, p = 0.5, k = 1, Figure = TRUE)
{
alphavec = seq(0, 1, 1e-05)
if (!is.null(ncp) & is.null(w)) betavec = 1 - Power.Chisq(df, ncp, alphavec, Figure = FALSE)$Power
if (is.null(ncp) & !is.null(w)) betavec = 1 - pwr.chisq.test(w=w,N=N,df=df,sig.level=alphavec,power=NULL)$power
M=E.loss(alphavec,betavec,p,k,Figure)
alphas=M$alpha.opt
cr1 = qchisq(1 - alphas, df = df)
return(list(alpha.opt = alphas, crit.opt = cr1, beta.opt = M$beta.opt))
} |
834bd78ed6c1fcaee345288b86e3be4751deb09d | 8a1f07759a8a75cf37d7a0e962544c8ab3fa3c96 | /other-cases/04a-rebuild-unemp.R | 9caa759bb1db8421e7c9f4a8a9f730fb90819f77 | [] | no_license | amoran57/thesis | 6b2938cc9ca8ea0262a942e32057eeb1592ab537 | af2073c4135bb89929ca56dcdfa210979e40c099 | refs/heads/master | 2023-03-29T17:20:52.014132 | 2021-03-28T21:56:42 | 2021-03-28T21:56:42 | 287,563,573 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,449 | r | 04a-rebuild-unemp.R | # Header ---------------------------------------
rm(list=ls())
header <- source("header.R")
#Code ------------------------------------------
us_df <- read_rds(paste0(export, "master_data.rds"))
#Review the transformation process ----------------
us_values <- us_df %>%
select(date, unemp, rate3month)
unemp_ts <- ts(us_values$unemp[-c(1:12)], start = c(1948, 1), frequency = 12)
this_unemp_ts <- window(unemp_ts, start = c(1989, 12), end = c(1999, 12))
#Previously, I had detrended the time series
unemp_df <- us_values[-c(1:12),] %>% select(date, unemp)
rownames(unemp_df) <- seq(1, nrow(unemp_df))
unemp_df$trend <- seq(1, nrow(unemp_df))
trend_model <- glm(unemp ~ trend, data = unemp_df)
summary(trend_model)
#To retrend, all I have to do is add the residuals to the fitted values
fitted <- trend_model$fitted.values
residuals <- trend_model$residuals
real_values <- fitted + residuals
detrend_unemp <- trend_model$residuals
detrend_unemp_ts <- ts(detrend_unemp, start = c(1948, 1), frequency = 12)
#Next I took the difference of the detrended residuals
diff_unemp_ts <- diff(detrend_unemp_ts)
plot(diff_unemp_ts)
#I just have to undo this to get the original values back
original_ts <- data.frame(date = seq(as.Date("1948/1/1"), as.Date("2020/8/1"), "month"), core = detrend_unemp_ts, diff = c(NA_real_, diff_unemp_ts), naive_diff = lag(c(NA_real_, diff_unemp_ts)))
original_ts$lag_original <- original_ts$core - original_ts$diff
original_ts$lag_naive <- original_ts$core - original_ts$naive_diff
# Import and undifference -----------------------------------------
#So all I have to do is bring in the predicted values from this, and run them backwards through the system
arima_forecast <- read_rds(paste0(export, "other_cases/unemp/arima_forecast.rds"))
forest_forecast <- read_rds(paste0(export, "other_cases/unemp/forecast.rds"))
ar1_forecast <- read_rds(paste0(export, "other_cases/unemp/ar1_forecast.rds"))
base_forecast <- read_rds(paste0(export, "other_cases/unemp/base_forecast.rds"))
forecast_df <- data.frame(date = seq(as.Date("1990/1/1"), as.Date("2000/1/1"), "month"),
arima = arima_forecast,
forest = forest_forecast,
ar1 = ar1_forecast,
base = base_forecast)
#Find the lags of the differenced forecasts
full_df <- left_join(original_ts, forecast_df, by = "date")
full_df$lag_arima_forecast <- full_df$core - full_df$arima
full_df$lag_forest_forecast <- full_df$core - full_df$forest
full_df$lag_ar1_forecast <- full_df$core - full_df$ar1
full_df$lag_base_forecast <- full_df$core - full_df$base
#Delag them
detrend_arima_forecast <- ts(full_df$lag_arima_forecast[-c(1:504)], start = c(1989, 12), frequency = 12)
detrend_forest_forecast <- ts(full_df$lag_forest_forecast[-c(1:504)], start = c(1989, 12), frequency = 12)
detrend_ar1_forecast <- ts(full_df$lag_ar1_forecast[-c(1:504)], start = c(1989, 12), frequency = 12)
detrend_base_forecast <- ts(full_df$lag_base_forecast[-c(1:504)], start = c(1989, 12), frequency = 12)
detrend_naive_forecast <- ts(original_ts$lag_naive[-c(1:2)], start = c(1948, 2), frequency = 12)
#Detrend them to get the original forecast
fitted <- trend_model$fitted.values
fitted_ts <- ts(fitted, start = c(1948, 1), frequency = 12)
this_fitted_ts <- window(fitted_ts, start = c(1989, 12), end = c(1999, 12))
arima_forecast <- this_fitted_ts + detrend_arima_forecast
forest_forecast <- this_fitted_ts + detrend_forest_forecast
ar1_forecast <- this_fitted_ts + detrend_ar1_forecast
base_forecast <- this_fitted_ts + detrend_base_forecast
naive_forecast <- this_fitted_ts + detrend_naive_forecast
# Compare values ---------------------------
accuracy(unemp_ts, arima_forecast)
accuracy(unemp_ts, forest_forecast)
accuracy(unemp_ts, ar1_forecast)
accuracy(unemp_ts, base_forecast)
accuracy(unemp_ts, naive_forecast)
# Export ------------------------------
write_rds(arima_forecast, paste0(export, "other_cases/unemp/retransformed_forecasts/arima_forecast.rds"))
write_rds(forest_forecast, paste0(export, "other_cases/unemp/retransformed_forecasts/forest_forecast.rds"))
write_rds(ar1_forecast, paste0(export, "other_cases/unemp/retransformed_forecasts/ar1_forecast.rds"))
write_rds(base_forecast, paste0(export, "other_cases/unemp/retransformed_forecasts/base_forecast.rds"))
write_rds(naive_forecast, paste0(export, "other_cases/unemp/retransformed_forecasts/naive_forecast.rds"))
|
cb366ba5536502c705da90ae3ba261340f3d61fa | bcb827857c741281b6a99616c8244d02a0d0bfc6 | /R/proper.R | 7a5cab9ac79588d8c967aab9b2ed522794cdf330 | [] | no_license | ellyck/upgraded-waffle | d6cd301b7af6941ec523f0000e994118e476c8b1 | f1fd6c8f160d6cc182123c50b62181b02c79a8e0 | refs/heads/master | 2022-02-22T00:52:38.209064 | 2017-11-21T22:21:58 | 2017-11-21T22:21:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 534 | r | proper.R | #' Proper function
#'
#' This function allows you to capitalize the first letter of a word and lowercase subsequent letters. Equivalent to the PROPER() function in Excel.
#' Credit: Matthew Plourde
#' https://stackoverflow.com/questions/24956546/capitalizing-letters-r-equivalent-of-excel-proper-function
#' @param string a character string or valid expression.
#' @keywords proper
#' @export
#' @examples
#' proper("moose") # returns "Moose"
proper <- function(x) {
gsub("(?<=\\b)([a-z])", "\\U\\1", tolower(x), perl=TRUE)
} |
a8705bfea6b13fa914a9d207a8219a2d5192292e | 319ddd88db39ec4bf32532de30048bfb894a34d7 | /PL/R/Requirements.R | a5c85470ca19fc5eb897058b7386e88cd6344e06 | [
"Apache-2.0"
] | permissive | gkantsidis/Utils | 52d82ff50b14b936655348cb7851a23dae36da05 | 47b415dcd578d328c85e2bc2167fb3ddfbcab4c8 | refs/heads/master | 2021-06-10T20:49:42.584416 | 2021-03-15T18:39:18 | 2021-03-15T18:39:18 | 142,796,162 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 759 | r | Requirements.R | packages = c(
"data.table",
"filehash", # key-value database
"foreach",
"ggrepel", # extension for ggplot2, with the purpose of adding labels to plots.
"jsonlite", # processing json files
"plyr", # tools for splitting, applying, and combining data
"psych", # tools for psychological, psychometric, and personality research,
# including read.clipboard.csv()
"ProjectTemplate", # automate creation of new projects
"scales", # map data to aesthetics
"tidyverse",
"tikzDevice", # graphics output in LaTeX format
"varhandle"
)
install.packages(packages, repos="http://cran.rstudio.com/")
|
31d0d8281b79ce85a6691e45f79255d18a66d302 | 613b31a6ab86690a46756605e551c58408b53d65 | /code/20210510_second_stage_modeling_2019.R | befbe7473f31d8381a615800c455c01832748337 | [] | no_license | mike-z-he/no2_model_mexico_city | c02b53a9e27fa77a629894038b37cb628463e0af | ebd106229fa5a5b6b1eeab9fd602555469437a9e | refs/heads/main | 2023-06-19T06:19:38.791913 | 2021-07-19T23:21:14 | 2021-07-19T23:21:14 | 330,858,908 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,733 | r | 20210510_second_stage_modeling_2019.R | #### Second Stage NO2 Modeling with 2019 data ####
#### May 10, 2021 ####
## 5/14: updated to change outcome to non z-scores
options(mc.cores=parallel::detectCores())
#### Load packages ####
library(tidyverse)
library(lubridate)
library(ranger)
library(lme4)
library(reshape)
library(foreign)
library(ggplot2)
library(plyr)
library(data.table)
library(Hmisc)
library(mgcv)
library(gdata)
library(car)
library(dplyr)
library(ggmap)
library(broom)
library(splines)
library(DataCombine)
library(readr)
library(bit64)
library(FNN)
library(splitstackshape)
library(foreign)
library(lattice)
library(datasets)
library(foreach)
library(doParallel)
library(ModelMetrics)
#### Load Dataset ####
setwd("D:/Users/profu/Documents/Schoolwork/Postdoc/Research Projects/no2_model_mexico_city/data/processed_data")
dta <- read_csv("second_stage_monitors_2019.csv")
## Adding Julian date
dta$jd <- julian(dta$date)
dta$yday <- yday(dta$date)
#### Rescaling Variables ####
names <- c("lat", "long", "omi_no2", "tropomi_no2", "wind_u", "wind_v", "temp_2m", "albedo", "pressure", "precip",
"blh", "cloud", "elevation", "road_length", "population", "ndvi", "monitor_no2", "rf_no2")
scaled <- dta[, names] %>%
mutate_all(funs(scale))
colnames(scaled) = paste0(colnames(scaled), "_z")
dta <- cbind(dta, scaled)
names(dta)
#### Preliminary Mixed Effect Modeling
# Simplest model
try <- lm(monitor_no2 ~ rf_no2_z, data=dta)
summary(try)
#Multiple R-squared: 0.1216, Adjusted R-squared: 0.1215
# Include lat/long
try <- lm(monitor_no2 ~ rf_no2_z + lat_z + long_z, data=dta)
summary(try)
#Multiple R-squared: 0.1753, Adjusted R-squared: 0.175
# Random intercept by day: mixed effects model
formula_1 <- as.formula(monitor_no2 ~ rf_no2_z + (1 + rf_no2_z | date))
mod_0 <- lmer(formula_1, data = dta)
summary(mod_0)
dta$pred.m0 <- predict(mod_0)
print(summary(lm(monitor_no2_z~pred.m0,data=dta))$r.squared)
# [1] 0.5309184
# Add lat/long
formula_1 <- as.formula(monitor_no2 ~ rf_no2_z + lat_z + long_z + (1 + rf_no2_z | date))
mod_0 <- lmer(formula_1, data = dta)
summary(mod_0)
dta$pred.m0 <- predict(mod_0)
print(summary(lm(monitor_no2_z~pred.m0,data=dta))$r.squared)
# [1] 0.5606047
# Add wind
formula_1 <- as.formula(monitor_no2 ~ rf_no2_z + lat_z + long_z + wind_u_z + wind_v_z + (1 + rf_no2_z | date))
mod_0 <- lmer(formula_1, data = dta)
summary(mod_0)
dta$pred.m0 <- predict(mod_0)
print(summary(lm(monitor_no2_z~pred.m0,data=dta))$r.squared)
# [1] 0.5818571
# Add temperature
formula_1 <- as.formula(monitor_no2 ~ rf_no2_z + lat_z + long_z + wind_u_z + wind_v_z + temp_2m_z + (1 + rf_no2_z | date))
mod_0 <- lmer(formula_1, data = dta)
summary(mod_0)
dta$pred.m0 <- predict(mod_0)
print(summary(lm(monitor_no2_z~pred.m0,data=dta))$r.squared)
# [1] 0.7320502 quite a large jump...does this have to do with the fact that temperature was used in stage 1 modeling?
# Add albedo
formula_1 <- as.formula(monitor_no2 ~ rf_no2_z + lat_z + long_z + wind_u_z + wind_v_z + temp_2m_z + albedo_z + (1 + rf_no2_z | date))
mod_0 <- lmer(formula_1, data = dta)
summary(mod_0)
dta$pred.m0 <- predict(mod_0)
print(summary(lm(monitor_no2_z~pred.m0,data=dta))$r.squared)
# [1] 0.7320561 #doesn't really help, won't add
# Add pressure
formula_1 <- as.formula(monitor_no2 ~ rf_no2_z + lat_z + long_z + wind_u_z + wind_v_z + temp_2m_z + pressure_z + (1 + rf_no2_z | date))
mod_0 <- lmer(formula_1, data = dta)
summary(mod_0)
dta$pred.m0 <- predict(mod_0)
print(summary(lm(monitor_no2_z~pred.m0,data=dta))$r.squared)
# [1] 0.7290074 actually decreases R2? won't add
# Add precipitation
formula_1 <- as.formula(monitor_no2 ~ rf_no2_z + lat_z + long_z + wind_u_z + wind_v_z + temp_2m_z + precip_z + (1 + rf_no2_z | date))
mod_0 <- lmer(formula_1, data = dta)
summary(mod_0)
dta$pred.m0 <- predict(mod_0)
print(summary(lm(monitor_no2_z~pred.m0,data=dta))$r.squared)
# [1] 0.7320782 doesn't really help, won't add
# Add blh
formula_1 <- as.formula(monitor_no2 ~ rf_no2_z + lat_z + long_z + wind_u_z + wind_v_z + temp_2m_z + blh_z + (1 + rf_no2_z | date))
mod_0 <- lmer(formula_1, data = dta)
summary(mod_0)
dta$pred.m0 <- predict(mod_0)
print(summary(lm(monitor_no2_z~pred.m0,data=dta))$r.squared)
# [1] 0.7325641
# Add cloud
formula_1 <- as.formula(monitor_no2 ~ rf_no2_z + lat_z + long_z + wind_u_z + wind_v_z + temp_2m_z + blh_z + cloud_z + (1 + rf_no2_z | date))
mod_0 <- lmer(formula_1, data = dta)
summary(mod_0)
dta$pred.m0 <- predict(mod_0)
print(summary(lm(monitor_no2_z~pred.m0,data=dta))$r.squared)
# [1] 0.7332009
# Add elevation
formula_1 <- as.formula(monitor_no2 ~ rf_no2_z + lat_z + long_z + wind_u_z + wind_v_z + temp_2m_z + blh_z + cloud_z + elevation_z + (1 + rf_no2_z | date))
mod_0 <- lmer(formula_1, data = dta)
summary(mod_0)
dta$pred.m0 <- predict(mod_0)
print(summary(lm(monitor_no2_z~pred.m0,data=dta))$r.squared)
# [1] 0.7351303
# Add road_length (final model for now)
formula_1 <- as.formula(monitor_no2 ~ rf_no2_z + lat_z + long_z + wind_u_z + wind_v_z + temp_2m_z + blh_z + cloud_z + elevation_z + road_length_z + (1 + rf_no2_z | date))
mod_0 <- lmer(formula_1, data = dta)
summary(mod_0)
dta$pred.m0 <- predict(mod_0)
print(summary(lm(monitor_no2_z~pred.m0,data=dta))$r.squared)
# [1] 0.7366251
# Add population
formula_1 <- as.formula(monitor_no2 ~ rf_no2_z + lat_z + long_z + wind_u_z + wind_v_z + temp_2m_z + blh_z + cloud_z + elevation_z + road_length_z + population_z + (1 + rf_no2_z | date))
mod_0 <- lmer(formula_1, data = dta)
summary(mod_0)
dta$pred.m0 <- predict(mod_0)
print(summary(lm(monitor_no2_z~pred.m0,data=dta))$r.squared)
# [1] 0.7362246 doesn't really help, won't add
# Add NDVI (NA = 93)
dta2 <- dta[!is.na(dta$ndvi), ]
formula_1 <- as.formula(monitor_no2 ~ rf_no2_z + lat_z + long_z + wind_u_z + wind_v_z + temp_2m_z + blh_z + cloud_z + elevation_z + road_length_z + population_z + ndvi_z + (1 + rf_no2_z | date))
mod_0 <- lmer(formula_1, data = dta2)
summary(mod_0)
dta2$pred.m0 <- predict(mod_0)
print(summary(lm(monitor_no2_z~pred.m0,data=dta2))$r.squared)
# [1] 0.7508423 does seem to help
# Add TROPOMI (NA = 3970)
dta3 <- dta2[!is.na(dta2$tropomi_no2), ]
formula_1 <- as.formula(monitor_no2 ~ rf_no2_z + lat_z + long_z + wind_u_z + wind_v_z + temp_2m_z + blh_z + cloud_z + elevation_z + road_length_z + population_z + ndvi_z + tropomi_no2_z + (1 + rf_no2_z | date))
mod_0 <- lmer(formula_1, data = dta3)
summary(mod_0)
dta3$pred.m0 <- predict(mod_0)
print(summary(lm(monitor_no2_z~pred.m0,data=dta3))$r.squared)
# [1] 0.7843405 helps slightly
#### Preliminary Random Forest Modeling ####
## Default model (for now)
formula <- as.formula(monitor_no2 ~ rf_no2 + lat + long + wind_u + wind_v + temp_2m + blh + cloud + elevation + road_length)
formula2 <- as.formula(monitor_no2 ~ rf_no2_z + lat_z + long_z + wind_u_z + wind_v_z + temp_2m_z + blh_z + cloud_z + elevation_z + road_length_z)
mod1 <- ranger(formula, data=dta, num.trees=50, mtry=9)
mod1$prediction.error
mod1$r.squared
#OOB.RMSE= 17.49538 OOB.R2= 0.762499 RUN in ~ 1 minute.
mod1 <- ranger(formula2, data=dta, num.trees=50, mtry=9)
mod1$prediction.error
mod1$r.squared
#OOB.RMSE= 17.65446 OOB.R2= 0.7603395 RUN in ~ 1 minute.
## Seems similar...I'll just use non z-scored version then
mod1 <- ranger(formula, data=dta, num.trees=100, mtry=9)
mod1$prediction.error
mod1$r.squared
#OOB.RMSE= 17.11238 OOB.R2= 0.7676983 RUN in ~ 1 minute.
mod1 <- ranger(formula, data=dta, num.trees=150, mtry=9)
mod1$prediction.error
mod1$r.squared
#OOB.RMSE= 16.85277 OOB.R2= 0.7712226 RUN in ~ 1 minute.
mod1 <- ranger(formula, data=dta, num.trees=500, mtry=9)
mod1$prediction.error
mod1$r.squared
#OOB.RMSE= 16.51723 OOB.R2= 0.7757775 RUN in ~ 1 minute.
## Will use 500 for now, since dataset is small
## Adding Julian date
formula <- as.formula(monitor_no2 ~ rf_no2 + lat + long + wind_u + wind_v + temp_2m + blh + cloud + elevation + road_length + jd)
mod1 <- ranger(formula, data=dta, num.trees=500, mtry=9)
mod1$prediction.error
mod1$r.squared
#OOB.RMSE= 12.29542 OOB.R2= 0.8330888 RUN in ~ 1 minute.
## Adding yday
formula <- as.formula(monitor_no2 ~ rf_no2 + lat + long + wind_u + wind_v + temp_2m + blh + cloud + elevation + road_length + yday)
mod1 <- ranger(formula, data=dta, num.trees=500, mtry=9)
mod1$prediction.error
mod1$r.squared
#OOB.RMSE= 12.28647 OOB.R2= 0.8332104 RUN in ~ 1 minute.
## Seems to be fine this time...and both perform similarly
## Adding TROPOMI (NA = 3970)
formula <- as.formula(monitor_no2 ~ rf_no2 + lat + long + wind_u + wind_v + temp_2m + blh + cloud + elevation + road_length + yday + tropomi_no2)
mod1 <- ranger(formula, data=dta3, num.trees=500, mtry=10)
mod1$prediction.error
mod1$r.squared
#OOB.RMSE= 13.35159 OOB.R2= 0.8315512 RUN in ~ 1 minute.
## Doesn't add much here |
0626bbca7d3a5ce77bfd0d5ca364daccc432c73e | 3b1c82ecb7622ce0030470c19732c17f6fda89ff | /SC2019Lab-1-刘方宇-16081127.R | a20445b835347896b9041c2f8d1d0517ef37ad51 | [] | no_license | anhnguyendepocen/SC2019-assignments | 64bbb7a8d82afba4cc636122ed89268db8aca25e | a4cc348c40c4dc4cb373cbbde2cf92acb71cd69b | refs/heads/master | 2020-09-02T03:41:35.656473 | 2019-04-12T12:48:48 | 2019-04-12T12:48:48 | null | 0 | 0 | null | null | null | null | GB18030 | R | false | false | 502 | r | SC2019Lab-1-刘方宇-16081127.R | library(dplyr)
#读入数据
pools1 <- read.table("E:\\swimming_pools.csv",sep = ",",header = T);pools1
pools2 <- read.csv("E:/swimming_pools.csv",sep=",");pools2
#输出数据
write.table(pools1, "E:/file1", sep = ",")
dput(pools1, "E:/file2")
save(pools2, file = "E:/file3")
#重新读取保存数据
read.csv("E:/file1")
dget("E:/file2")
#删除pools2
rm(pools2)
#重新读入pools2
load("E:/file3");pools2
pools1[1]
pools1[c(1,2)];class(pools1[1])
pools1[[1]];class(pools1[[1]])
|
d661feb41949010a6e14db934dad9edc7d5e96f6 | 7a2fa7a3ea14fc702ab000f37655a123129a0391 | /cachematrix.R | 06642197278c50fe2443e37d24b0519a94b83c35 | [] | no_license | RBelson/ProgrammingAssignment2 | 320f3c63b2370571feb0617556eb91c49149d4d1 | 119ad7ddfdbef0c93178f0218f59a1e3ca4af923 | refs/heads/master | 2020-03-28T21:23:53.067685 | 2018-09-17T20:36:27 | 2018-09-17T20:36:27 | 147,934,830 | 0 | 0 | null | 2018-09-08T13:15:23 | 2018-09-08T13:15:23 | null | UTF-8 | R | false | false | 2,039 | r | cachematrix.R | ## Coursera Assignment - R Programming - Assignment 2: Lexical Scoping
# These 2 functions are useful because they allow the user to calculate the inverse of their matrix and cache the results.
# This can help avoid recomputing the same inverse matrix repeatedly and allow the user to quickly access the stored result.
# This first function, makeCacheMatrix() helps facilitate the caching by:
# setting the value of the matrix, getting the value of the matrix,
# setting the value of the inverse, and getting the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
inv_mat <- NULL
set <- function(y) {
x <<- y
inv_mat <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv_mat <<- inverse
getinverse <- function() inv_mat
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
# This 2nd function cacheSolve(), returns the inverse of the matrix from the cache if it has already been calculated. If not,
# then it will calculate the inverse and set the value in the cache.
cacheSolve <- function(x, ...) {
inv_mat <- x$getinverse()
if(!is.null(inv_mat)) {
message("getting cached matrix")
return(inv_mat)
}
data <- x$get()
inv_mat <- solve(data, ...)
x$setinverse(inv_mat)
inv_mat
}
###################################################################################################
# Testing to see if new functions work
test_matrix <- matrix(c(1:4), 2, 2)
# Quick view of test_matrix:
# [,1] [,2]
# [1,] 1 3
# [2,] 2 4
test_cachematrix <- makeCacheMatrix(test_matrix)
cacheSolve(test_cachematrix)
# Quick view of Output of cacheSolve(test_cachematrix) -- No cache in first run
# [,1] [,2]
# [1,] -2 1.5
# [2,] 1 -0.5
# Test again -- see if it is retrieved from cache
cacheSolve(test_cachematrix)
# getting cached matrix
# [,1] [,2]
# [1,] -2 1.5
# [2,] 1 -0.5
# Success!
|
15c20771014094a8030a64b18448dc3bfded7b4c | 07194315eb96fa85bb0bc0eda24900f1f571fa6d | /plot2.R | 2af3b8f48f7e0760ed56b590aaa99135442347a0 | [] | no_license | srajasek/ExData_Plotting1 | 0bc8113abbd4ea7185239085280ff64775edf6de | a802c58a101fad4f7944e874bf25c60a67c44824 | refs/heads/master | 2021-01-12T21:45:07.083007 | 2015-10-11T18:38:46 | 2015-10-11T18:38:46 | 43,988,295 | 0 | 0 | null | 2015-10-10T01:35:38 | 2015-10-10T01:35:38 | null | UTF-8 | R | false | false | 1,049 | r | plot2.R | ## Sets detination directory where data file exists.
destdir <- "household_power_consumption.txt"
## reads data from the text files and puts into a data frame.
powerData <- read.csv(destdir, header = TRUE, sep = ";")
##head(powerData)
## Subsets the data for 2 days
reqData <- subset(powerData, Date == "1/2/2007" | Date == "2/2/2007",
select=Date: Sub_metering_3)
dates <- c("2007-02-01","2007-02-02","2007-02-03")
d <- as.Date(dates) # format="%Y-%m-%d"
xmin <- min(d)
xmax <- max(d)
x <- weekdays(d)
weekdays(c(xmin,xmax))
## Converts the requires variable data into numeric
y <- as.numeric(as.character(reqData$Global_active_power))
## Code to save the file as plot1.png
pngFilelocation <- ("C://XXX//Data Science - Coursera//Exploratory Data Analysis//Project 1//plot2.png")
png(file = pngFilelocation ,width = 480, height = 480, units = "px", bg = "transparent")
## Generates the Line Plot
plot(y, type="l",xaxt="n", xlab = "", ylab = "Global Active Power(kilowatts)")
axis(1, at=seq_along(d), labels=x, las=1)
|
fd39874c558ae0ffdc72b8b1aac77a442192a06f | ecf1aa864dfc40840f5b0c98965f7d55875e135f | /MODULES/MORE/modelCode.R | 320950e5430878d8ee39a746b2d2f77c8905c60d | [] | no_license | VeenDuco/shinyforcmdstan | 78b03ec5cd2378ab594ab1a7552a655f70ca3462 | 74da0751f7958d08a969d05c17d168e91a2ecd18 | refs/heads/main | 2023-02-09T22:56:25.189305 | 2021-01-04T19:16:23 | 2021-01-04T19:16:23 | 325,535,205 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 594 | r | modelCode.R | modelCodeUI <- function(id){
ns <- NS(id)
tagList(
div(div(
img(
src = "wide_ensemble.png",
class = "wide-ensemble",
width = "100%"
)
),
div(
style = "margin-top: 25px",
img(src = "stan_logo.png", class = "stan-logo"),
div(id = "shinystan-title", "ShinyStan")
)),
fluidRow(
align="center",
br(), br(),
tags$textarea(
id = "user_model_code",
wrap = "off",
cols = 80,
rows = 20,
# SSO()@model_code
)
)
)
}
modelCode <- function(input, output, session){
}
|
05dab13eadb618091f68c6ed92eb436d1e7db96e | b8723b94da48a1e39bb7b9b2495f6eb68c9dd7e8 | /R/readwrite.R | 9f5026b5042b57a40e70621a691e221e70b7a046 | [] | no_license | cran/kstMatrix | 50d9bdc9f8ebfb0b3fddb135a04e8fbba39df578 | b5fbab8ae6e34ce0461bd1b0eeb47b068ce62347 | refs/heads/master | 2023-01-28T00:44:11.723822 | 2023-01-23T14:30:02 | 2023-01-23T14:30:02 | 148,498,260 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,398 | r | readwrite.R | #' Knowledge spaces on reading and writing abilities
#'
#' Bases of knowledge spaces on reading/writing abilities obtained from querying
#' experts.
#'
#' Three experts were queried about prerequisite relationships between 48 items
#' on reading and writing abilities (Dowling, 1991; 1993). A forth basis represents
#' those prerequisite relationships on which the majority of the experts agree
#' (Dowling & Hockemeyer, 1998).
#'
#' @docType data
#'
#' @usage readwrite
#'
#' @format A list containing four bases (rw1 to rw3, and rwmaj) in binary
#' matrix form. Each matrix has 48 columns representing the different knowledge
#' items and a varying number of rows containing the basis elements.
#'
#' @references Dowling, C. E. (1991). Constructing Knowledge Structures from
#' the Judgements of Experts. Habilitationsschrift, Technische Universität
#' Carolo-Wilhelmina, Braunschweig, Germany.
#'
#' Dowling, C. E. (1993). Applying the basis of a knowledge space for controlling
#' the questioning of an expert. Journal of Mathematical Psychology, 37, 21–48.
#'
#' Dowling, C. E. & Hockemeyer, C. (1998). Computing the intersection of knowledge
#' spaces using only their basis. In Cornelia E. Dowling, Fred S. Roberts, & Peter
#' Theuns, editors, Recent Progress in Mathematical Psychology, pp. 133–141.
#' Lawrence Erlbaum Associates Ltd., Mahwah, NJ.
#'
#' @keywords data
#'
"readwrite"
|
105de425c9140630f92bcb4baa3a31dab557de8e | 9108f2123a3ab44b45701a3e045c8194ed4b147a | /R/save_plot_template.R | 2b5780a185cc103c8d65257f031bc961b9c6e3fc | [] | no_license | RStelzle/rstfuns | 79c9f47cd76b6d49be033b25ba9dfede7b414332 | 5cdc8767d7a70d6576a2d37c86bddab2ecfc0399 | refs/heads/master | 2020-12-10T01:21:11.526890 | 2020-09-30T11:11:16 | 2020-09-30T11:11:16 | 233,469,518 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,232 | r | save_plot_template.R | #' Exporting Plots for use in LaTeX
#'
#'
#' @param plot_object Plot to save. Defaults to last_plot()
#' @param filenames name of the .tex and .pdf files
#' @param pdfwidth Width of the .pdf file in inches
#' @param pdfheight Height of the .pdf file in inches
#' @param latexwidth Proportion of the linewidth to use in LaTeX. Defaults to 1
#' @param latexlabel LaTeX label for referencing the plot in the LaTeX document
#' @param latexcaption Figure Caption in LaTeX
#' @keywords strings string utility
#' @export
save_plot_template <- function(plot_object = ggplot2::last_plot(), filenames, pdfwidth, pdfheight, latexwidth=1, latexlabel, latexcaption) {
grDevices::cairo_pdf(filename = here::here("output", "figures", paste0(filenames, ".pdf")), width = pdfwidth, height = pdfheight)
print(plot_object)
grDevices::dev.off()
tmptbbl <-c("\\begin{figure}[!t]",
"\\centering",
paste0("\\includegraphics[width=", latexwidth, "\\linewidth]{../output/figures/", filenames, ".pdf}"),
paste0("\\caption{", latexcaption, "\\label{", latexlabel, "}}"),
"\\end{figure}"
)
readr::write_lines(tmptbbl, here::here("output", "figures", paste0(filenames, ".tex")))
}
|
3a8b0381e3198a56eda7784bfb939693594dff2c | 27c44597a702f993af2c5ccf2608999903e1ac69 | /Fall_18/ESL506_StatisticalDataMiningI/HW1/HW1_Sol.R | 6469e6274b33b6c68bddc73a9989b35eb4ec580a | [] | no_license | mybinaryworld/UB-MS-Data-Sciences | d56e55b0dcda13e0bafaa0a39172243daaed50cc | 87fe1b8a5752991a97e061d25c3603b51e2035fd | refs/heads/master | 2020-04-16T17:33:36.017567 | 2019-02-17T17:06:02 | 2019-02-17T17:06:02 | 165,780,090 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 21,774 | r | HW1_Sol.R | setwd('/Users/abhishekkumar/Documents/MS_UB/Fall_18/ESL506_StatisticalDataMiningI/HW1/student')
library(ggplot2)
d1=read.table("student-mat.csv",sep=";",header=TRUE)
d2=read.table("student-por.csv",sep=";",header=TRUE)
d3=merge(d1,d2, all = TRUE)
print(nrow(d3)) # 382 students
################################################################################################
## ***SCHOOL
##Inference : Students at GP perform better in portuguese while perform similarly in Maths
################################################################################################
ggplot(d3, aes(x = school, y = G1.y))+ geom_boxplot()+ labs(title("Boxplot"),
x = "School", y = "Grades in Portuguese")
ggplot(d3, aes(x = school, y = G1.x))+ geom_boxplot()+ labs(title("Boxplot"), x = "School", y = "Grades in Math")
################################################################################################
## SEX (can remove)
##Inference : Female students performed well in portuguese but not in maths
################################################################################################
ggplot(d3, aes(x = G1.x))+ geom_density(aes(colour = as.factor(d3$sex)))
ggplot(d3, aes(x = sex, y = G1.x))+ geom_boxplot()+ labs(title = "First Period Grade", x = "Sex", y = "Grades in Math")
ggplot(d3, aes(x = sex, y = G1.y))+ geom_boxplot()+ labs(title = "First Period Grade", x = "Sex", y = "Grades in Portuguese")
by(d3$G1, d3$sex, summary)
################################################################################################
## *AGE
##Inference : There are very few students whose age is >= 20 and seems to be outliers
################################################################################################
ggplot(aes(x = age, y = G1.x), data = d3) + geom_point()
ggplot(aes(x = age, y = G1.x), data = d3) + geom_point()
ggplot(d3, aes(x = age, y = G1.x))+ geom_boxplot(aes(colour = as.factor(age)),alpha = 0.5)
ggplot(d3, aes(x = age, y = G1.x))+ geom_boxplot(aes(colour = as.factor(age)),alpha = 0.5)
ggplot(d3, aes(x = age, color = school,fill = school)) + geom_histogram() +
scale_y_continuous(breaks = seq(0, 300, 50)) + scale_x_continuous(breaks = seq(0, 22, 1))
by(d3$G1.x, d3$age, summary)
by(d3$G1.x, d3$age, summary)
#--------------------------------------------------------------------------------------
#Defining a new subset with age < 19, Calculating only for G1
#Inference : Students with age 15,16 have good grades and low variance in Portuguese but
# in Maths age 16,17 seems to perform better
general_age_subgroup <- subset(d3, age <19)
ggplot(general_age_subgroup, aes(x = age, y = G1.x))+ geom_boxplot(aes(colour = as.factor(age)))
ggplot(general_age_subgroup, aes(x = G1.x))+ geom_density()+ facet_wrap(~age)
################################################################################################
## ***ADDRESS
##Inference : Students in Urban Area perform better in both the subjects (boxplot)
################################################################################################
ggplot(d3, aes(x = G1.x))+ geom_density(aes(colour = as.factor(d3$address)))
ggplot(d3, aes(x = G1.y))+ geom_density(aes(colour = as.factor(d3$address)))
ggplot(d3, aes(x = address, y = G1.x))+ geom_boxplot(aes(colour = as.factor(address)),alpha = 0.5)
ggplot(d3, aes(x = address, y = G1.y))+ geom_boxplot(aes(colour = as.factor(address)),alpha = 0.5)
################################################################################################
## **FAMSIZE
##Inference : The students with family size less than 3 has higher G1 mean(boxplot)
################################################################################################
ggplot(d3, aes(x = G1))+ geom_density(aes(colour = as.factor(famsize)))
ggplot(d3, aes(x =famsize, y = G3))+ geom_boxplot(aes(colour = as.factor(famsize)),alpha = 0.5)
################################################################################################
## PSTATUS
##Inference : It seems students whose parents are alone perform better in both the subjects
################################################################################################
ggplot(d3, aes(x = G1.x))+ geom_density(aes(colour = as.factor(Pstatus)))
ggplot(d3, aes(x = Pstatus, y = G1.x))+ geom_boxplot(aes(colour = as.factor(Pstatus)),alpha = 0.5)
################################################################################################
## MEDU
##Inference : With density plot we don't see any info but boxplot gives an interesting
## inference; children with mothers having higher education have higher mean of G1 in
#maths but not so in Portuguese. In portuguese students with higher mother's edu performed better
################################################################################################
ggplot(d3, aes(x = G1.x))+ geom_density(aes(colour = as.factor(Medu)))
ggplot(d3, aes(x = Medu, y = G1.x))+ geom_boxplot(aes(colour = as.factor(Medu)),alpha = 0.5)
################################################################################################
## FEDU
#Inference : There are very few students with fathers education 0 and seems to be an outlier
#In portuguese and maths students having higher father's education perform better
################################################################################################
ggplot(d3, aes(x = G1.x))+ geom_density(aes(colour = as.factor(Fedu)))
ggplot(d3, aes(x = Fedu, y = G1.x))+ geom_boxplot(aes(colour = as.factor(Fedu)),alpha = 0.5)
################################################################################################
## MJOB
#Inference : Students whose mothers are working in service sector perform better in Maths than students
#whose mothers are at_home. while this difference is not very visible in Portuguese
################################################################################################
d3$Mjob <- factor(d3$Mjob,levels = c('at_home','other','services','teacher','health'),ordered = TRUE)
ggplot(d3, aes(x = Mjob, y = G1))+ geom_boxplot(aes(colour = as.factor(Mjob)),alpha = 0.5)
################################################################################################
## FJOB
#Inference : Students whose fathers are teachers tends to do exceptionally better than other
#students in Maths. This is not very visible in Portuguese
################################################################################################
d3$Fjob <- factor(d3$Fjob,levels = c('at_home','other','services','teacher','health'),ordered = TRUE)
ggplot(d3, aes(x = Fjob, y = G1.x))+ geom_boxplot(aes(colour = as.factor(Fjob)),alpha = 0.5)
################################################################################################
## REASON
#Inference : We can clearly infer from the histogram that the reputation of school GP is quite
#good and most students have enrolled considering this. But the major factor for considering
#the school is course in both the schools. In GP, its reputation as well as distance from home
#is also considered but in MS, it seems that students have only enrolled due to course and
#probably because they were not able to enroll into a school with better reputation.
################################################################################################
ggplot(d3, aes(x = reason))+ geom_histogram(stat = "count")+ facet_wrap(~school)
ggplot(d3, aes(x = reason, y = G1.x))+ geom_boxplot(aes(colour = as.factor(reason)),alpha = 0.5) + facet_wrap(~school)
################################################################################################
## ***GUARDIAN
#Inference : From boxplot we can infer that students whose guardian are other than father
#or mother struggle in both the subjects, predominantly more in Maths
################################################################################################
ggplot(d3, aes(x = G1.x))+ geom_density(aes(colour = as.factor(d3$guardian.x)))
ggplot(d3, aes(x = guardian.x, y = G1.x))+ geom_boxplot(alpha=0.2)
################################################################################################
## TRAVELTIME
#Inference : Students with traveltime > 30min seem to suffer more in Maths less in Portuguese
################################################################################################
ggplot(d3, aes(x = traveltime.x, y = G1.x))+ geom_boxplot(aes(colour = as.factor(traveltime.x)),alpha = 0.5)
################################################################################################
## STUDYTIME
#Inference : Students with studytime = 5-10 hours/week seem to do preety good(boxplot)
#When we look at the zoomed picture of histogram, we see that some students study less than 2 hours
#weekly and score good grades
################################################################################################
ggplot(d3, aes(x = studytime.x, y = G1.x))+ geom_boxplot(aes(colour = as.factor(studytime.x)),alpha = 0.5)
ggplot(d3, aes(x = G1.x, color = as.factor(studytime.x),fill = as.factor(studytime.x))) + geom_histogram()
ggplot(d3, aes(x = G1.x, color = as.factor(studytime.x),fill = as.factor(studytime.x))) + geom_histogram() + coord_cartesian(xlim=c(15, 20))
#----------------------------------------------------------------------------------------
#FINDING EINSTEINS <- Selecting students who study 2-5 hours per week and score G1>=18
einsteins.x <- subset(d3, studytime.x<=2)
einsteins.x <- subset(einsteins.x, G1.x>=18)
#We find 6 such students in Maths and two in Portuguese, and guess what! Either one of their parents is teacher or both of them!!
#Need to study more about students whose parents are teachers
einsteins.y <- subset(d3, studytime.y<=2)
einsteins.y <- subset(einsteins.y, G1.y>=18)
################################################################################################
## FAILURES
##Inference : As expected, people who never failed performed distinctly well in both the subjects
################################################################################################
ggplot(d3, aes(x = as.factor(failures.x), y = G1))+ geom_boxplot()
################################################################################################
## SCHOOLSUP
#Inference : Very few students took school support in Maths, almost 50, similar in portuguese,
#But those who took don't seem to perform better than those who took.
################################################################################################
ggplot(d3, aes(x = schoolsup.x))+ geom_histogram(stat = "count") + scale_y_continuous(breaks = seq(0, 1000, 100))
ggplot(d3, aes(x = as.factor(schoolsup.x), y = G1.x))+ geom_boxplot()
#---------------------------------------------------------------------------------
#We made a new subset of students who have failed earlier and made a box plot to see the
#performance of the students who took schoolsup Vs those who didn't.
failed_stud$x <- subset(d3, failures.x>0)
ggplot(failed_stud, aes(x = as.factor(schoolsup.x), y = G1.x))+ geom_boxplot()
by(failed_stud$G1.x, failed_stud$schoolsup.x, summary) #check median
#We found that of 66 took Maths schoolsup and 34 postuguese schoolsup
#Most who took Maths performed better while in portuguese not visible improvement is seen
################################################################################################
## FAMSUP(can delete)
##Inference : More students took family support, close to 232 in Math and Portuguese. Overall they
#did similar to those who didn't take family suuport.
#Even when considering just failed students, it didn't matter if student took family support; both
#performed even. In both the subjects.
ggplot(d3, aes(x = famsup.x))+ geom_histogram(stat = "count") + scale_y_continuous(breaks = seq(0, 1000, 100))
ggplot(d3, aes(x = as.factor(famsup.x), y = G1.x))+ geom_boxplot()
ggplot(failed_stud, aes(x = as.factor(famsup.x), y = G1.x))+ geom_boxplot()
################################################################################################
## PAID (can delete)
#Inference : 220 students took paid extra classes. But they performed same as those who didn't
#even among those who have failed earlier. Negative performance for students who have failed earlier
# in Maths and portugues .
#took school support
ggplot(d3, aes(x = paid.x))+ geom_histogram(stat = "count") + scale_y_continuous(breaks = seq(0, 1000, 100))
ggplot(d3, aes(x = as.factor(paid.x), y = G1.x))+ geom_boxplot()
ggplot(failed_stud, aes(x = as.factor(paid.x), y = G1.x))+ geom_boxplot()
################################################################################################
## ACTIVITIES
#Inference : Students who were involved in extra activities performed better in Portuguese but
#this
ggplot(d3, aes(x = activities.x))+ geom_histogram(stat = "count")
ggplot(d3, aes(x = as.factor(activities.x), y = G1.x))+ geom_boxplot()
ggplot(failed_stud, aes(x = as.factor(activities.x), y = G1.x))+ geom_boxplot()
################################################################################################
## **NURSERY
##Inference : Don't seem to make any difference
################################################################################################
ggplot(d3, aes(x = G3))+ geom_density(aes(colour = as.factor(d3$nursery)))
ggplot(d3, aes(x = as.factor(nursery), y = G1.x))+ geom_boxplot()
ggplot(d3, aes(x = as.factor(nursery), y = G1.y))+ geom_boxplot()
################################################################################################
## DALC
#Inference : Students who drink less on weekday perform better in Portuguese while no significant
#difference in Maths
################################################################################################
d3$ddrinker <- ifelse(d3$Dalc > 3, 1, 0)
ggplot(d3, aes(x = d3$G3))+ geom_density(aes(colour = as.factor(d3$ddrinker)))
ggplot(d3, aes(x = as.factor(Dalc.y), y = G1.y))+ geom_boxplot()
ggplot(d3, aes(x = as.factor(Dalc.x), y = G1.x))+ geom_boxplot()
################################################################################################
## WALC
#Inference : Students who drink less on weekend perform better in Portuguese while no significant
#difference in Maths
################################################################################################
d3$ddrinker <- ifelse(d3$Walc > 3, 1, 0)
ggplot(d3, aes(x = d3$G3))+ geom_density(aes(colour = as.factor(d3$ddrinker)))
ggplot(d3, aes(x = as.factor(Walc.y), y = G1.y))+ geom_boxplot()
ggplot(d3, aes(x = as.factor(Walc.x), y = G1.x))+ geom_boxplot()
################################################################################################
## HIGHER
#Inference : Remarkable difference in grades in both the subjects. Students who want to go for
#higher education has higher grades
################################################################################################
ggplot(d3, aes(x = as.factor(higher.y), y = G1.y))+ geom_boxplot()
ggplot(d3, aes(x = as.factor(higher.x), y = G1.x))+ geom_boxplot()
################################################################################################
## GOOUT
#Inference : Remarkable difference in grades in both the subjects. Students who want to go for
#higher education has higher grades
################################################################################################
ggplot(d3, aes(x = as.factor(goout.y), y = G1.y))+ geom_boxplot()
ggplot(d3, aes(x = as.factor(goout.x), y = G1.x))+ geom_boxplot()
################################################################################################
## INTERNET(can delete)
#Inference :
################################################################################################
ggplot(d3, aes(x = as.factor(internet), y = G1.x))+ geom_boxplot()
################################################################################################
## HEALTH (can delete)
#Inference : Not a factor, irrelevant
################################################################################################
ggplot(d3, aes(x = as.factor(health.y), y = G1.y))+ geom_boxplot()
################################################################################################
## ROMANTIC (can delete, Not a factor)
#Inference : Students who drink less on weekday perform better in Portuguese while no significant
#difference in Maths
################################################################################################
ggplot(d3, aes(x = as.factor(Dalc.x), y = G1.x))+ geom_boxplot()
################################################################################################
## FAMREL
#Inference : Didn't effect grades in Portuguese, but in maths students who reported worst
#family relations suffered
################################################################################################
ggplot(d3, aes(x = as.factor(famrel.y), y = G1.y))+ geom_boxplot()
ggplot(d3, aes(x = as.factor(famrel.x), y = G1.x))+ geom_boxplot()
################################################################################################
## FREETIME
#Inference : can't find pattern in maths, didn't effect grades in Portuguese
################################################################################################
ggplot(aes(x = freetime.x, y = G1.x), data = d3) + geom_point(alpha= 0.1)
ggplot(aes(x = freetime.y, y = G1.y), data = d3) + geom_point(alpha= 0.1)
################################################################################################
## ABSENCES
#Inference : Students who drink less on weekday perform better in Portuguese while no significant
#difference in Maths
################################################################################################
d3$absences_norm.x <- scale(d3[30:30])
#OR
ggplot(aes(x = G1.x, y = ((absences.x-min(absences.x)))/(max(absences.x)-min(absences.x))),
data = d3) + geom_point()
ggplot(d3, aes(x = as.factor(Dalc.y), y = G1.y))+ geom_boxplot()
ggplot(d3, aes(x = as.factor(Dalc.x), y = G1.x))+ geom_boxplot()
############################################################################################
# Histogram for 1D plot
qplot(x = age, data = d3)
#Faceting according to gender
qplot(x = G3, data = d3, binwidth = 2) + facet_wrap(~sex)
qplot(x = studytime, data = subset(d3, G3==10) , binwidth = 0.5, ylab = 'No of students with G3=10')
qplot(x = studytime, data = subset(d3, G3>=15) , binwidth = 0.5, ylab = 'No of students with G3>=15')
qplot(x = studytime, data = subset(d3, G3>=18) , binwidth = 0.5, ylab = 'No of students with G3>=18')
qplot(x = G3, data = d3, binwidth = 2) + scale_x_continuous(lim = c(0, 22),
breaks = seq(0, 20, 2)) + facet_wrap(~sex)
#Plot histogram for health vs grade
ggplot(aes(x = G1), data = d3, binwidth = 2) + geom_histogram() + facet_wrap(~health_grade)
ggplot(aes(x = G2), data = d3, binwidth = 1) + geom_histogram() + facet_wrap(~health_grade)
ggplot(aes(x = G3), data = d3) + geom_histogram() + facet_wrap(~health_grade)
ggplot(aes(x = G3), data = d3, binwidth = 1) + geom_density() + facet_wrap(~health_grade)
#
#
library(gridExtra)
x = qplot(x = studytime, data = subset(d3, G3<=9) , binwidth = 0.5, ylab = 'No of students with G3<=9')
y = qplot(x = studytime, data = subset(d3, G3>=15) , binwidth = 0.5, ylab = 'No of students with G3>=15')
grid.arrange(x , y, ncol=2)
# Making new column G1_Grade(0,1,2)
d3$G1_Grade <- ifelse(d3$G1 < 9, 0, 1)
d3$G1_Grade <- ifelse( d3$G1 >14 , d3$G1_Grade+1 , d3$G1_Grade)
qplot(x =G1_Grade, data = d3, binwidth = 0.5 )
# Making new column G2_Grade(0,1,2)
d3$G2_Grade <- ifelse(d3$G2 < 9, 0, 1)
d3$G2_Grade <- ifelse( d3$G2 >14 , d3$G2_Grade+1 , d3$G2_Grade)
qplot(x =G2_Grade, data = d3, xlab = 'Friend Count',
ylab = 'Proportion of Users with that friend count', binwidth = 0.5 )
# Making new column G3_Grade(0,1,2)
d3$G3_Grade <- ifelse(d3$G3 < 9, 0, 1)
d3$G3_Grade <- ifelse( d3$G3 >14 , d3$G3_Grade+1 , d3$G3_Grade)
qplot(x =G3_Grade, data = d3, binwidth = 0.5 )
#
table(d3$sex)
table(d3$G3 ,d3$sex)
by(d3$G3_Grade, d3$sex, summary)
qplot(x = G3, data = d3, binwidth = 2, geom = 'freqpoly')
qplot(x = G3, data = d3, binwidth = 2, geom = 'freqpoly', xlab = 'G3 Grades',
ylab = 'No of students with that Grade', color = sex)
qplot(x = G3, y = ..count.., data = d3, binwidth = 2, geom = 'freqpoly', color = sex)
qplot(x = G3, y = ..count../sum(..count..), data = d3, binwidth = 2, geom = 'freqpoly', color = sex)
#DENSITY PLOT
a <- ggplot(d3, aes(x = G3))
a + geom_density() + geom_vline(aes(xintercept = mean(G3)), linetype = "dashed", size = 0.6)
#DENSITY PLOT WITH COUNT
a + geom_density(aes(y = ..count..), fill = "lightgray") +
+ geom_vline(aes(xintercept = mean(G3)), linetype = "dashed", size = 0.6, color = "#FC4E07")
#FREQUENCY POLYGON
a <- ggplot(d3, aes(x = G3))
a + geom_freqpoly(binwidth = 1)
#BOXPLOT(Check boxplot for health Vs G3), inference : health not a factor for failure
a <- ggplot(d3, aes(x = sex, y = G3))
a + geom_boxplot()
a + geom_boxplot() + coord_cartesian(ylim = c(5, 15))
#scatterplot
#forms a positive incline with studytime implying that students could improve grades by
#studying more hours but its still very difficult to get very good grades.
ggplot(aes(x = G3, y = studytime), data = d3) + geom_point()
ggplot(aes(x = studytime, y = G3), data = d3) + geom_point(alpha= 0.05)
#Getting outliers in this
ggplot(aes(x = G1, y = G2), data = d3) + geom_point(aes(colour = as.factor(paid)), alpha = 0.1)
ggsave("healthVsG3_scatterplot.png")
|
dba0a06ccf3dfa9175969fdde5ee322da4e39d09 | 0a0268f255e13dee6264a0e7c24aacbad87461c6 | /R/matter_fct.R | c1f57d35172996b3ec9df6b35b4ae5a2bf003ab0 | [] | no_license | kuwisdelu/matter | 45fd08a00618236a53af254e32dcf97609025649 | 9d2055cc9e753aded6bc2736de641140289ac7fb | refs/heads/devel | 2023-08-25T10:13:18.152860 | 2023-08-21T22:49:38 | 2023-08-21T22:49:38 | 64,353,260 | 53 | 7 | null | 2023-06-27T17:41:27 | 2016-07-28T01:18:22 | R | UTF-8 | R | false | false | 3,196 | r | matter_fct.R |
#### 'matter_fct' class for file-based character vectors ####
## ----------------------------------------------------------
setClass("matter_fct",
slots = c(
levels = "ANY",
labels = "character"),
contains = "matter_vec",
validity = function(object) {
errors <- NULL
if ( !is.null(object@levels) ) {
if ( length(object@levels) != length(object@labels) )
errors <- c(errors, "'levels' and 'labels' must be the same length")
if ( typeof(object@levels) != object@type )
errors <- c(errors, "type of 'levels' must match object data type")
}
if ( is.null(errors) ) TRUE else errors
})
matter_fct <- function(data, levels, path = NULL,
length = NA_integer_, names = NULL, offset = 0, extent = NA_real_,
readonly = NA, append = FALSE, labels = as.character(levels), ...)
{
if ( !missing(data) && !is.null(data) ) {
if ( is.na(length) )
length <- length(data)
if ( is.null(names) )
names <- names(data)
if ( missing(levels) )
levels <- sort(unique(data))
data <- factor(data, levels=levels, labels=labels)
levels <- seq_len(nlevels(data))
labels <- levels(data)
}
x <- matter_vec(NULL, type=typeof(levels), path=path, length=length,
names=names, offset=offset, extent=extent,
readonly=readonly, append=append, rowMaj=FALSE, ...)
x <- as(x, "matter_fct")
x@levels <- levels
x@labels <- labels
if ( !missing(data) && !is.null(data) )
x[] <- data
if ( validObject(x) )
x
}
setMethod("as.factor", "matter_fct",
function(x) {
names(x) <- NULL
dimnames(x) <- NULL
if ( getOption("matter.coerce.altrep") ) {
as.altrep(x)
} else {
x[]
}
})
setMethod("describe_for_display", "matter_fct", function(x) {
desc1 <- paste0("<", length(x), " length> ", class(x))
desc2 <- paste0("out-of-memory factor")
paste0(desc1, " :: ", desc2)
})
setMethod("preview_for_display", "matter_fct", function(x) {
preview_vector(x)
cat("Levels(", nlevels(x), "): ", sep="")
cat(paste_head(x@labels), "\n")
})
get_matter_fct_elts <- function(x, i = NULL) {
y <- get_matter_arr_elts(x, i)
factor(y, levels=x@levels, labels=x@labels)
}
set_matter_fct_elts <- function(x, i = NULL, value = NULL) {
if ( is.factor(value) )
value <- x@labels[value]
value <- x@levels[match(value, x@labels)]
set_matter_arr_elts(x, i, value)
}
setMethod("[", c(x = "matter_fct"),
function(x, i, ..., drop = TRUE) {
i <- as_subscripts(i, x)
if ( is_nil(drop) ) {
subset_matter_arr_elts(x, i)
} else {
get_matter_fct_elts(x, i)
}
})
setReplaceMethod("[", c(x = "matter_fct"),
function(x, i, ..., value) {
i <- as_subscripts(i, x)
set_matter_fct_elts(x, i, value)
})
setMethod("combine", "matter_fct",
function(x, y, ...) {
if ( any(x@levels != y@levels) )
stop("factor levels must match")
if ( any(x@labels != y@labels) )
stop("factor labels must match")
new("matter_fct", callNextMethod(),
levels=x@levels, labels=x@labels)
})
setMethod("levels", "matter_fct", function(x) x@labels)
setReplaceMethod("levels", "matter_fct",
function(x, value) {
if ( is.null(names(value)) ) {
x@labels <- value
} else {
x@levels <- as.vector(value)
x@labels <- names(value)
}
if ( validObject(x) )
x
})
|
9cca73a747ad118880769fe1f32bc4064f7a6e53 | f997169854672f36810e793a2932313f11b52139 | /data/florida.R | ec34fa9d737a3dceefe474f43dde904a5faf6481 | [] | no_license | jverzani/UsingR | 7e3fcbddae97a0ecd0268a9068af7a70ecc82907 | d1cd49622b6e85cf26710c5747423b4ba0721ef6 | refs/heads/master | 2021-01-09T20:53:56.202763 | 2020-07-29T16:53:55 | 2020-07-29T16:53:55 | 57,312,995 | 1 | 3 | null | null | null | null | UTF-8 | R | false | false | 6,091 | r | florida.R | "florida" <-
structure(list(County = structure(c(1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
58, 59, 60, 61, 62, 63, 64, 65, 66, 67), .Label = c("ALACHUA",
"BAKER", "BAY", "BRADFORD", "BREVARD", "BROWARD", "CALHOUN",
"CHARLOTTE", "CITRUS", "CLAY", "COLLIER", "COLUMBIA", "DADE",
"DE SOTO", "DIXIE", "DUVAL", "ESCAMBIA", "FLAGLER", "FRANKLIN",
"GADSDEN", "GILCHRIST", "GLADES", "GULF", "HAMILTON", "HARDEE",
"HENDRY", "HERNANDO", "HIGHLANDS", "HILLSBOROUGH", "HOLMES",
"INDIAN RIVER", "JACKSON", "JEFFERSON", "LAFAYETTE", "LAKE",
"LEE", "LEON", "LEVY", "LIBERTY", "MADISON", "MANATEE", "MARION",
"MARTIN", "MONROE", "NASSAU", "OKALOOSA", "OKEECHOBEE", "ORANGE",
"OSCEOLA", "PALM BEACH", "PASCO", "PINELLAS", "POLK", "PUTNAM",
"SANTA ROSA", "SARASOTA", "SEMINOLE", "ST. JOHNS", "ST. LUCIE",
"SUMTER", "SUWANNEE", "TAYLOR", "UNION", "VOLUSIA", "WAKULLA",
"WALTON", "WASHINGTON"), class = "factor", .Names = c("ALACHUA",
"BAKER", "BAY", "BRADFORD", "BREVARD", "BROWARD", "CALHOUN",
"CHARLOTTE", "CITRUS", "CLAY", "COLLIER", "COLUMBIA", "DADE",
"DE SOTO", "DIXIE", "DUVAL", "ESCAMBIA", "FLAGLER", "FRANKLIN",
"GADSDEN", "GILCHRIST", "GLADES", "GULF", "HAMILTON", "HARDEE",
"HENDRY", "HERNANDO", "HIGHLANDS", "HILLSBOROUGH", "HOLMES",
"INDIAN RIVER", "JACKSON", "JEFFERSON", "LAFAYETTE", "LAKE",
"LEE", "LEON", "LEVY", "LIBERTY", "MADISON", "MANATEE", "MARION",
"MARTIN", "MONROE", "NASSAU", "OKALOOSA", "OKEECHOBEE", "ORANGE",
"OSCEOLA", "PALM BEACH", "PASCO", "PINELLAS", "POLK", "PUTNAM",
"SANTA ROSA", "SARASOTA", "SEMINOLE", "ST. JOHNS", "ST. LUCIE",
"SUMTER", "SUWANNEE", "TAYLOR", "UNION", "VOLUSIA", "WAKULLA",
"WALTON", "WASHINGTON")), GORE = c(47300, 2392, 18850, 3072,
97318, 386518, 2155, 29641, 25501, 14630, 29905, 7047, 328702,
3322, 1825, 107680, 40958, 13891, 2042, 9565, 1910, 1420, 2389,
1718, 2341, 3239, 32644, 14152, 166581, 2154, 19769, 6868, 3038,
788, 36555, 73560, 61425, 5403, 1011, 3011, 49169, 44648, 26619,
16483, 6952, 16924, 4588, 140115, 28177, 268945, 69550, 199660,
74977, 12091, 19482, 41559, 12795, 72854, 58888, 9634, 4084,
2647, 1399, 97063, 3835, 5637, 2796), BUSH = c(34062, 5610, 38637,
5413, 115185, 177279, 2873, 35419, 29744, 41745, 60426, 10964,
289456, 4256, 2698, 152082, 73029, 12608, 2448, 4750, 3300, 1840,
3546, 2153, 3764, 4743, 30646, 20196, 176967, 4985, 28627, 9138,
2481, 1669, 49963, 106141, 39053, 6860, 1316, 3038, 57948, 55135,
33864, 16059, 16404, 52043, 5058, 134476, 26216, 152846, 68581,
184312, 90101, 13439, 39497, 34705, 36248, 83100, 75293, 12126,
8014, 4051, 2326, 82214, 4511, 12176, 4983), BUCHANAN = c(262,
73, 248, 65, 570, 789, 90, 182, 270, 186, 122, 89, 561, 36, 29,
650, 504, 83, 33, 39, 29, 9, 71, 24, 30, 22, 242, 99, 836, 76,
105, 102, 29, 10, 289, 305, 282, 67, 39, 29, 272, 563, 108, 47,
90, 267, 43, 446, 145, 3407, 570, 1010, 538, 147, 229, 124, 311,
305, 194, 114, 108, 27, 26, 396, 46, 120, 88), NADER = c(3215,
53, 828, 84, 4470, 7099, 39, 1461, 1378, 562, 1400, 258, 5355,
157, 75, 2752, 1729, 435, 85, 139, 97, 56, 86, 39, 75, 103, 1501,
409, 7348, 91, 950, 138, 76, 26, 1459, 3587, 1932, 284, 19, 54,
2489, 1810, 1075, 1090, 255, 984, 131, 3881, 732, 5564, 3392,
9986, 2060, 379, 1214, 1368, 723, 4066, 1940, 307, 182, 59, 29,
2436, 149, 265, 93), BROWN = c(658, 17, 171, 28, 643, 1212,
10, 127, 194, 204, 185, 127, 759, 23, 32, 954, 297, 60, 17, 24,
52, 12, 21, 11, 17, 11, 116, 51, 1104, 18, 123, 40, 14, 6, 203,
538, 330, 92, 12, 18, 243, 361, 105, 162, 63, 313, 21, 892, 309,
743, 412, 1222, 365, 114, 210, 165, 131, 431, 551, 53, 53, 4,
13, 3211, 30, 68, 32), HAGELIN = c(42, 3, 18, 2, 39, 128, 1,
15, 16, 14, 34, 7, 119, 0, 2, 160, 24, 4, 3, 4, 1, 3, 4, 2, 2,
1, 26, 14, 215, 7, 13, 2, 1, 0, 36, 81, 28, 1, 3, 2, 35, 26,
29, 26, 8, 15, 4, 65, 21, 143, 82, 444, 59, 7, 11, 12, 13, 94,
38, 2, 4, 3, 0, 33, 3, 11, 20), HARRIS = c(4, 0, 5, 0, 11, 49,
0, 6, 5, 1, 7, 1, 88, 0, 0, 36, 6, 1, 1, 7, 0, 0, 2, 6, 0, 3,
8, 5, 35, 1, 4, 0, 2, 3, 4, 30, 9, 1, 0, 0, 5, 14, 13, 1, 0,
4, 1, 13, 10, 45, 18, 40, 8, 2, 4, 4, 1, 11, 38, 2, 2, 0, 1,
9888, 2, 3, 0), MCREYNOLDS = c(658, 0, 3, 0, 11, 35, 1, 3, 0,
3, 4, 2, 36, 3, 0, 16, 3, 3, 0, 3, 0, 1, 2, 9, 0, 2, 4, 3, 29,
3, 2, 1, 1, 1, 1, 5, 7, 1, 0, 1, 3, 6, 8, 0, 4, 2, 1, 7, 5, 302,
14, 27, 5, 4, 2, 10, 1, 5, 5, 0, 0, 1, 0, 3, 1, 2, 0), MOOREHEAD = c(21,
3, 37, 3, 76, 123, 3, 12, 28, 9, 29, 5, 124, 2, 2, 41, 20, 12,
2, 12, 4, 1, 9, 4, 3, 2, 22, 7, 150, 2, 10, 7, 0, 0, 14, 96,
31, 11, 2, 5, 26, 49, 12, 7, 3, 20, 4, 45, 33, 103, 77, 167,
36, 12, 13, 29, 19, 59, 70, 17, 5, 1, 0, 59, 6, 18, 5), PHILLIPS = c(20,
3, 18, 2, 72, 74, 2, 19, 18, 6, 10, 8, 69, 8, 3, 57, 110, 3,
3, 6, 2, 0, 2, 7, 2, 7, 10, 5, 66, 6, 13, 4, 0, 1, 21, 34, 16,
10, 1, 1, 19, 22, 19, 3, 3, 33, 3, 41, 10, 188, 17, 70, 46, 10,
12, 13, 43, 15, 27, 3, 9, 8, 0, 2927, 0, 7, 9), Total = c(86242,
8154, 58815, 8669, 218395, 573306, 5174, 66885, 57154, 57360,
92122, 18508, 625269, 7807, 4666, 264428, 116680, 27100, 4634,
14549, 5395, 3342, 6132, 3973, 6234, 8133, 65219, 34941, 353331,
7343, 49616, 16300, 5642, 2504, 88545, 184377, 103113, 12730,
2403, 6159, 110209, 102634, 61852, 33878, 23782, 70605, 9854,
279981, 55658, 432286, 142713, 396938, 168195, 26205, 60674,
77989, 50285, 160940, 137044, 22258, 12461, 6801, 3794, 198230,
8583, 18307, 8026)), .Names = c("County", "GORE", "BUSH", "BUCHANAN",
"NADER", "BROWN", "HAGELIN", "HARRIS", "MCREYNOLDS", "MOOREHEAD",
"PHILLIPS", "Total"), row.names = c("1", "2", "3", "4", "5",
"6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16",
"17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27",
"28", "29", "30", "31", "32", "33", "34", "35", "36", "37", "38",
"39", "40", "41", "42", "43", "44", "45", "46", "47", "48", "49",
"50", "51", "52", "53", "54", "55", "56", "57", "58", "59", "60",
"61", "62", "63", "64", "65", "66", "67"), class = "data.frame")
|
07b444b4a17aca620a7b79b777810b9cd84bd215 | 852da529709d8baace2d7666bd985456784b8ffd | /Scripts/Vectors.R | 41af565a7cfec16ff547afbffd1af540705c5f87 | [] | no_license | kristulr/2017-06-06_R_workshop | 21338695199236acdc59271a4b8674c6a7598482 | 9ba2ce2859e199d008a4ae74d01ebf150484c4da | refs/heads/master | 2021-01-25T04:49:46.603800 | 2017-06-07T12:51:17 | 2017-06-07T12:51:17 | 93,488,772 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,834 | r | Vectors.R |
x <- 5*6
x
#how to check if x is really a vector
is.vector(x)
length(x)
x[2]<-31
X
x
x[5]<-44
x
# [ -> shift + alt + (
x[11]
x[0]
x <- 1:4
x
y <- x^2
y
x <- 1:5
y <- 3:7
x
y
x+y
z <- y[-5]
x+z
z <- 1:10
z^x
c("hello", "workshop", "participants!")
str(c("hello", "workshop", "participants!"))
c(9:11, 200, x)
str(c(9:11, 200, x))
c("something", pi, 2:4, pi > 3)
str(c("something", pi, 2:4, pi > 3))
w <- rnorm(10) #lger en vector med ti tilfeldige tall fra en normaldistribution
seq_along(w)
w
which(w<0) #her får vi oppgitt hvilke som har verdier lavere enn 10, dvs hvilke posisjoner de innehar
w[which(w<0)] #her får vi nr 3, 8 og 10 i vectoren, men verdiene deres.
w[w<10]
w[-c(2,5)]
#in a vector, all data need to be of the same type, or R will coerce data into the same type
#in lists, data is allowed to be of different types
list("something", pi, 2:4, pi >3)
str(list("something", pi, 2:4, pi >3))
list (vegetable = "cabbage",
number = pi,
series = 2:4,
telling = pi>3)
#assign this list to vector x
x <- list (vegetable = "cabbage",
number = pi,
series = 2:4,
telling = pi>3)
str(x)
#$ gives a meny/list of data we put into x
x$vegetable
x[1]
str(x[1])
str(x$vegetable)
x[3]
x[[3]]
x <- list(vegetable = list("cabbage", "carrot", "spinach"),
number = list(c(pi, 0, 2.14, NA)),
series = list(list(2:4, 3:5)),
telling = pi > 3)
str(x)
#lists: to get the elements out of the list/extract elements out of the list: double brackets or dollar signs
#can always use str to check the structure of the list
mod <- lm(lifeExp ~ gdpPercap, data=gapminder_plus)
mod
str(mod)
mod[[8]]
str(mod[[8]])
#eller
str(mod[["df.residual"]])
str(mod[["qr"]])
mod$qr$qr[1]
|
19bfe85664b21bbc003829276b857cfd5318e6b2 | bbc3943cfd57260dfc3c0603a48a80eadfd19220 | /man/bipartNetwork.Rd | 2e58d33da439d443fb174621ac6114f9940dd497 | [] | no_license | umatter/netensembleR | cef47df9c67c139bba8a5238aa2deecb605249b1 | 6f5f62a62d3d25f7ec330a1df0086809af8a208c | refs/heads/master | 2021-01-19T08:41:20.719220 | 2016-07-19T07:25:19 | 2016-07-19T07:25:19 | 87,662,883 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 822 | rd | bipartNetwork.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bipartNetwork.R
\name{bipartNetwork}
\alias{bipartNetwork}
\title{Build bipartite network}
\usage{
bipartNetwork(m)
}
\arguments{
\item{m}{a matrix with one type of observation (e.g., legislators) in rows and the other type of observation (e.g., bills) in columns (see Details)}
}
\value{
a bipartite network (object of class igraph)
}
\description{
Returns a bipartite network (e.g, of bill sponsorship)
}
\details{
Example of bill (co-)sponsorthip: m a matrix with legislators in rows and bills in columns.
Cell values in m are equal to 1 if legislator i sponsored bill j.
}
\examples{
# graph example
library(igraph)
m <- random_Matrix("binary", 10)
g <- bipartNetwork(m)
plot(g)
}
\author{
Ulrich Matter <ulrich.matter-at-unibas.ch>
}
|
be7b238080843d84c729f438f743126f442c6b8b | 2c47584f81ed8e2dc987923b1d4981badd4381d0 | /ui.R | 26e125ffe1c34f825be69a99dd6d555c71691d03 | [] | no_license | kevinbgunn/Developing-Data-Products | ea6c8cab2a1533d05322abbc7345e5505a798f9a | 6ab337ad9f1f56dbeda893af2aab8c423c25bc12 | refs/heads/master | 2020-07-02T03:58:08.532721 | 2016-11-21T05:27:08 | 2016-11-21T05:27:08 | 74,328,407 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,418 | r | ui.R | library(shiny)
shinyUI(
pageWithSidebar(
# Application title
headerPanel("Body Mass Index (BMI) Calculator"),
sidebarPanel(
numericInput('weight', 'Insert your weight in pounds', 150, min = 50, max = 400, step = 1) ,
numericInput('height', 'Insert your height in inches', 60, min = 20, max = 96, step = 1),
submitButton('Submit')
),
mainPanel(
p('The Body mass index (BMI) is a measure of body fat based on the height and the weight of adult men and women.
'),
p('The BMI is designed to assess an individuals body weight depending on his/her height. '),
p('The formula for calculating BMI is: BMI = (Weight in Pounds / (Height in inches x Height in inches)) x 703'),
p('The classification for BMI values is the following:'),
tags$div(
tags$ul(
tags$li('BMI <18.5 : Underweight'),
tags$li('BMI (18.5-24.9) : Normal weight'),
tags$li('BMI (25-29.9) : Overweight'),
tags$li('BMI >=30 : Obesity')
)
),
p('The values you entered:'),
p('Weight:'), verbatimTextOutput("inputweightvalue"),
p('Height:'), verbatimTextOutput("inputheightvalue"),
p('Your BMI is :'),
verbatimTextOutput("estimation"),
p('You are:'),(verbatimTextOutput("diagnosis"))
)
)
) |
9dea8232c7584564fe1b39692702da53898b3da9 | 66295ea19b0485b4fe0d8bcfb7054bb7fbb578d0 | /Step05_Boxplots_without_tissues.R | 59732f083a2dbb4602589d8af80e81155a9980b6 | [] | no_license | NicoPillon/Muscle_Models_Profiling | 717baeda0d68ac1e1a35f15b2a5a5eb33313011b | cc325ce01c0164b97eccd1be765bf22034c7608c | refs/heads/master | 2022-03-16T04:03:15.813054 | 2019-12-12T18:41:15 | 2019-12-12T18:41:15 | 192,915,604 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,352 | r | Step05_Boxplots_without_tissues.R | ##############################################################################################################################
# Expression data in rat, mouse and human muscle, L6, C2C12 and human myotubes ========
##############################################################################################################################
library(ggplot2)
library(ggrepel)
library(ggfortify)
library(gplots)
library(stringr)
library(grid)
library(gridExtra)
library(here)
cbPalette <- c("#56B4E9", "#D3C839", "#CC79A7", "#0072B2", "#E69F00", "#D55E00") #color palette for colorblind people
# light blue yellow pink dark blue orange red
cbShapes <- c( 21 , 21 , 24 , 24 , 22 , 22 , 23 , 23 )
cbLines <- c( 'a' , 'b' , 'c' , 'd' , 'e' , 'f' , 'g' , 'h' )
#load data and define samples
res <- readRDS(here("Data_Processed", "GENENAME_norm.Rds"))
res <- cbind(res[grep('HumanCell', colnames(res))],
res[grep('MouseC2C12', colnames(res))],
res[grep('RatL6', colnames(res))])
Sample <- c(rep("HSMC", length(grep('HumanCell', colnames(res)))),
rep("C2C12", length(grep('MouseC2C12', colnames(res)))),
rep("L6", length(grep('RatL6', colnames(res)))))
#======================================================================================================
# Function to make boxplots
#======================================================================================================
PlotFunction <- function(genename) {
data <- data.frame() #create an empty dataframe to collect data
for( i in 1:length(genename)) {
y <- as.numeric(res[genename[i],]) #collect data for gene name i
datay <- cbind.data.frame(Sample, y, rep(genename[i])) #create table with x="sample type", y="data", "gene name"
colnames(datay) <- c("x","y","Gene") #rename column names to make it possible to rbind later
data <- rbind.data.frame(data, datay) #bind the new gene data at the bottom of the previous one
}
data$x <- factor(data$x, levels=c("HSMC", "C2C12", "L6")) #for a box plot, x should be a factor
ggplot(data, aes(x=x, y=y, fill=x)) +
geom_boxplot() +
labs(x="",
y=paste(genename)) +
theme_bw() +
theme(plot.title = element_text(face="bold", color="black", size=7, angle=0),
axis.text.x = element_text(color="black", size=6, angle=45, hjust=1),
axis.text.y = element_text(color="black", size=6, angle=0),
axis.title = element_text(face="bold", color="black", size=7, angle=0),
legend.text = element_text(face="bold", color="black", size=6, angle=0),
legend.position="none", legend.title = element_blank()) +
scale_fill_manual(values=cbPalette) +
scale_color_manual(values=cbPalette)
}
PlotFunction('MYH4')
PlotFunction('CAMK2A')
PlotFunction('MYOC')
#======================================================================================================
# Gene markers of glycolysis, contraction and beta-oxidation
#======================================================================================================
# Glycolysis: phosphofructokinase (PFKM) step is the rate-limiting step
# Beta-oxidation: Carnitine Palmitoyltransferase (CPT1A) is the rate limiting step
# Contraction response: myosin heavy chain (MYH1) is specific to adult striated muscle
# Insulin response: glucose transporter system (GLUT4) is rate limiting
library(grid)
library(gridExtra)
tiff(filename=here("Figures", "Contraction.tiff"), #print graph
units="cm", width=12, height=10,
pointsize=12, res=1200)
matrix <- rbind(c(1,2,3), c(4,5,6))
grid.arrange(PlotFunction('MYH1') + ylim(-4, 7.5),
PlotFunction('MYH3') + ylim(-4, 7.5),
PlotFunction('MYH4') + ylim(-4, 7.5),
PlotFunction('MYH6') + ylim(-4, 7.5),
PlotFunction('MYH7')+ ylim(-4, 7.5),
PlotFunction('MYH9')+ ylim(-4, 7.5),
layout_matrix=matrix)
dev.off()
tiff(filename=here("Figures", "Glc_Uptake_basal.tiff"), #print graph
units="cm", width=(3*1.6), height=4,
pointsize=12, res=1200)
matrix <- rbind(c(1,2))
grid.arrange(PlotFunction('SLC2A1'),
PlotFunction('SLC2A3'),
layout_matrix=matrix)
dev.off()
tiff(filename=here("Figures", "Glc_Uptake_insulin.tiff"), #print graph
units="cm", width=(3*1.6), height=4,
pointsize=12, res=1200)
matrix <- rbind(c(1,2))
grid.arrange(PlotFunction('SLC2A4'),
PlotFunction('PIK3CD'),
layout_matrix=matrix)
dev.off()
tiff(filename=here("Figures", "Glyg_Synthesis_basal.tiff"), #print graph
units="cm", width=(3*1.6), height=4,
pointsize=12, res=1200)
matrix <- rbind(c(1,2))
grid.arrange(PlotFunction('GYS1'),
PlotFunction('GYS2'),
layout_matrix=matrix)
dev.off()
tiff(filename=here("Figures", "Glyg_Synthesis_insulin.tiff"), #print graph
units="cm", width=(3*1.6), height=4,
pointsize=12, res=1200)
matrix <- rbind(c(1,2))
grid.arrange(PlotFunction('GSK3A'),
PlotFunction('GSK3B'),
layout_matrix=matrix)
dev.off()
tiff(filename=here("Figures", "FAOx.tiff"), #print graph
units="cm", width=6.5, height=4.3,
pointsize=12, res=1200)
matrix <- rbind(c(1,2))
grid.arrange(PlotFunction('CPT1B'),
PlotFunction('CPT2'),
layout_matrix=matrix)
dev.off()
tiff(filename=here("Figures", "GlcOx.tiff"), #print graph
units="cm", width=6.5, height=4.3,
pointsize=12, res=1200)
matrix <- rbind(c(1,2))
grid.arrange(PlotFunction('PDHA1'),
PlotFunction('PDHB'),
layout_matrix=matrix)
dev.off()
tiff(filename=here("Figures", "Glycolysis.tiff"), #print graph
units="cm", width=(3*2), height=6,
pointsize=12, res=1200)
matrix <- rbind(c(1,2))
grid.arrange(PlotFunction('PFKM'),
PlotFunction('LDHA'),
layout_matrix=matrix)
dev.off()
tiff(filename=here("Figures", "Proliferation.tiff"), #print graph
units="cm", width=9.5, height=5,
pointsize=12, res=1200)
matrix <- rbind(c(1,2,3))
grid.arrange(PlotFunction('MKI67'),
PlotFunction('PLK1'),
PlotFunction('CCNB1'),
layout_matrix=matrix)
dev.off()
tiff(filename=here("Figures", "Lactate.tiff"), #print graph
units="cm", width=6, height=4.5,
pointsize=12, res=1200)
matrix <- rbind(c(1,2))
grid.arrange(PlotFunction('LDHA') + ylim(-1.5,7),
PlotFunction('LDHB') + ylim(-1.5,7),
layout_matrix=matrix)
dev.off()
tiff(filename=here("Figures", "CS.tiff"), #print graph
units="cm", width=5, height=4.5,
pointsize=12, res=1200)
matrix <- rbind(c(1,2))
grid.arrange(PlotFunction('CS') + ylim(-0.5,5),
PlotFunction('CS') + ylim(-0.5,5),
layout_matrix=matrix)
dev.off()
tiff(filename=here("Figures", "EPS_GlcUptake.tiff"), #print graph
units="cm", width=7.5, height=4.25,
pointsize=12, res=1200)
matrix <- rbind(c(1,2,3))
grid.arrange(PlotFunction('TBC1D1'),
PlotFunction('CAMK2A'),
PlotFunction('RAC1'),
layout_matrix=matrix)
dev.off()
|
b31e427835827c1ec31e3d00cbebd76bd2f865a0 | c6f1c615abb4f9c8a9a3ebe17f14a7c67d01d0bd | /02-building-blocks-of-data/sampling-from-vectors.R | c4476e4fb757d3365768872bc906c13081f3729d | [] | no_license | melff/dataman-r | 05b903ace706d41ef91e7a113adcedc92c0be879 | 4e919fd9be46acc243f702889fdb88ed8c4429d7 | refs/heads/main | 2023-06-23T03:24:28.475559 | 2021-07-15T19:53:32 | 2021-07-15T19:53:32 | 324,203,840 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 111 | r | sampling-from-vectors.R | #' # Sampling from vectors
set.seed(143)
sample(1:9)
sample(1:1000,size=20)
sample(6,size=10,replace=TRUE)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.