blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a2f49d3c44fb9e79c02e1ad2d64ea424b0ef785a | 50140c9993cd0182cf187fde9042c390c753a362 | /Compound_Alignment.R | 6fc31dc33a51f2ae0eb374589435c034cb96a0ca | [] | no_license | zhengfj1994/Liquid | 9932e26d25dd22a7177d94dab00acb44eae32f72 | 578ba389b56049b89dd3a906bfed76767fbeb4df | refs/heads/master | 2020-07-27T20:36:46.446352 | 2015-03-05T15:39:39 | 2015-03-05T15:39:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,865 | r | Compound_Alignment.R | #This files contain all of the function modules related to Compound Alignment Algorithm
AlignmentFile_Load <- function(FileFolderPath)
{
Peak_FileName_List <-list.files(path=FileFolderPath, pattern ="Group_Annotate_Isotope_Annotate_Adduct_Refinement_RtMap.csv", full.names=TRUE)
Peak_List <- read.csv(file=Peak_FileName_List[1],head=TRUE,sep=",")
Compound_List_Temp <- Peak_List
Compound_List_Temp$FileID <- rep(1, nrow(Peak_List))
Compound_List <- Compound_List_Temp
if(length(Peak_FileName_List)>1)
{
for (File_ID in 2:length(Peak_FileName_List))
{
Peak_List <- read.csv(file=Peak_FileName_List[File_ID],head=TRUE,sep=",")
Compound_List_Temp <- Peak_List
Compound_List_Temp$FileID <- rep(File_ID, nrow(Peak_List))
Compound_List <- rbind(Compound_List, Compound_List_Temp)
}
}
Compound_List
}
#This function can get the Compound's Retention time by LUT of the scan_retention_time map for compound's scan value.
#The scan value of Compound_Apex, Compound_Left_Boundary and Compound_Right_Boundary can be got by the median scan value of each compound's annaotation peaks scan value.
# Here, we think the annotation peaks have more confidence than fragment peaks to a compound.
Acquire_Compound_RetentionIme_Map <- function(Compound_List_In, Retention_Time_Scan_Map)
{
Compound_List_Out<- Compound_List_In
CompoundID_Low <- max(1, min(Compound_List_Out$CompoundID)) # Must exclude the cases of CompoundID==0, becuase CompoundID==0 means the the peaks can't be grouped into any compound cluster, and also these peak can't be identified from the hmdb libary.
CompoundID_High<- max(Compound_List_Out$CompoundID)
Compound_List_Out$Compound_Apex <- rep(0, nrow(Compound_List_Out))
Compound_List_Out$Compound_Left_Boundary <- rep(0, nrow(Compound_List_Out))
Compound_List_Out$Compound_Right_Boundary<- rep(0, nrow(Compound_List_Out))
Compound_List_Out$Compound_Intensity <- rep(0, nrow(Compound_List_Out))
for(Id_index in CompoundID_Low:CompoundID_High)
{
One_Compound <- Compound_List_Out[which(Compound_List_Out$CompoundID ==Id_index),]
One_Compound_Valid <- One_Compound[which(One_Compound$Compound_MolecularMass>0),]
Compound_Apex_Scan <- ceiling(median(One_Compound_Valid$Apex_Pos)) #Use ceiling function to avoid the fragment Apex_Scan case
Compound_Left_Scan <- ceiling(median(One_Compound_Valid$Left_Boundary))
Compound_Right_Scan<- ceiling(median(One_Compound_Valid$Right_Boundary))
Compound_Intensity <- max(One_Compound_Valid$Peak_Intensity)
Compound_Apex_minutes <-Retention_Time_Scan_Map[which(Retention_Time_Scan_Map$scan_num == Compound_Apex_Scan),2]
Compound_Left_Boundary_minutes <-Retention_Time_Scan_Map[which(Retention_Time_Scan_Map$scan_num == Compound_Left_Scan),2]
Compound_Right_Boundary_minutes<-Retention_Time_Scan_Map[which(Retention_Time_Scan_Map$scan_num == Compound_Right_Scan),2]
Compound_List_Out[which(Compound_List_Out$CompoundID ==Id_index),]$Compound_Apex <-Compound_Apex_minutes
Compound_List_Out[which(Compound_List_Out$CompoundID ==Id_index),]$Compound_Left_Boundary <-Compound_Left_Boundary_minutes
Compound_List_Out[which(Compound_List_Out$CompoundID ==Id_index),]$Compound_Right_Boundary<-Compound_Right_Boundary_minutes
Compound_List_Out[which(Compound_List_Out$CompoundID ==Id_index),]$Compound_Intensity <-Compound_Intensity
}
Compound_List_Out
}
# Alignment process is for aligning the retention time across samples. So, we need convert the scan number into rention time for each sample.
Compound_Alignment <- function(Compound_List, Align_Window_Phase1, Align_Window_Phase2, Mass_Tol, Two_Phase, Aligned_FileName)
{
Compound_List$Align_ID <-rep(0, nrow(Compound_List)) # Align_ID is the final unique ID acorss samples.
Compound_List$Align_RT <-rep(0, nrow(Compound_List)) # Align_RT is the final aligned Retention time across samples.
Compound_List_ForAlign <-Compound_List[which((Compound_List$Align_ID==0)&(Compound_List$CompoundID>0)),]
Align_ID <- 1
while(nrow(Compound_List_ForAlign)>0) #&(Compound_List$Compound_MolecularMass>0)
{
cat(paste("Align_ID=",Align_ID, "\r", sep=""))
Compound_Intensity_Array <- Compound_List_ForAlign$Compound_Intensity
Compound_MolecularMass_Array <- Compound_List_ForAlign$Compound_MolecularMass
# Calculate the index of Align Target Peak
Index_Peak_MeetMass_Larger_Zero <- which(Compound_MolecularMass_Array>0)
Index_Peak_MaxIntensity_Mass_Larger_Zero<- Index_Peak_MeetMass_Larger_Zero[which.max(Compound_Intensity_Array[Index_Peak_MeetMass_Larger_Zero])]
# Get the Compound_Mass and Retention time of the Align Target Compound
FileID_Max_Intensity <- Compound_List_ForAlign[Index_Peak_MaxIntensity_Mass_Larger_Zero,]$FileID
CompoundID_Max_Intensity <- Compound_List_ForAlign[Index_Peak_MaxIntensity_Mass_Larger_Zero,]$CompoundID
Target_Compound_index <- which((Compound_List_ForAlign$FileID==FileID_Max_Intensity)&(Compound_List_ForAlign$CompoundID==CompoundID_Max_Intensity)&(Compound_List_ForAlign$Compound_MolecularMass>0))
AlignTarget_Compound_Mass <- median(Compound_List_ForAlign[Target_Compound_index,]$Compound_MolecularMass)
AlignTarget_RT <- median(Compound_List_ForAlign[Target_Compound_index,]$Compound_Apex)
# AlignTarget_Compound_Mass <- Compound_List_ForAlign[Index_Peak_MaxIntensity_Mass_Larger_Zero,]$Compound_MolecularMass
# AlignTarget_RT <- Compound_List_ForAlign[Index_Peak_MaxIntensity_Mass_Larger_Zero,]$Compound_Apex
cat(paste("AlignTarget_Compound_Mass=",AlignTarget_Compound_Mass, "\r", sep=""))
cat(paste("AlignTarget_RT=",AlignTarget_RT, "\r", sep=""))
FileID_Min <- min(Compound_List_ForAlign$FileID)
FileID_Max <- max(Compound_List_ForAlign$FileID)
# Use Compound_Align_List to store the compounds that should be aligned to the group of AlignTarget
Compound_Align_List <- NULL
Compound_Mass_Align_Array <- NULL
RT_Align_Array <- NULL
for(index_FileID in FileID_Min:FileID_Max)
{
Compound_List_ForAlign_File <- Compound_List_ForAlign[which(Compound_List_ForAlign$FileID==index_FileID),]
CompoundID_Min <- min(Compound_List_ForAlign_File$CompoundID)
CompoundID_Max <- max(Compound_List_ForAlign_File$CompoundID)
for(index_CompoundID in CompoundID_Min:CompoundID_Max)
{
Compound_List_ForAlign_File_Compound <- Compound_List_ForAlign_File[which(Compound_List_ForAlign_File$CompoundID==index_CompoundID),]
Compound_List_ForAlign_File_Compound_Mass <- Compound_List_ForAlign_File_Compound[which(Compound_List_ForAlign_File_Compound$Compound_MolecularMass>0),]
# If the compound has been aligned already, we need neglect it
if(nrow(Compound_List_ForAlign_File_Compound_Mass)>0)
{
Compound_MolecularMass_Temp <- median(Compound_List_ForAlign_File_Compound_Mass$Compound_MolecularMass)
Compound_RT_Temp <- median(Compound_List_ForAlign_File_Compound_Mass$Compound_Apex)
#Compare the compound's retention time and molecular mass fall in the setting align window of the align target compound or not
if((abs(Compound_RT_Temp-AlignTarget_RT)<(Align_Window_Phase1/2.0))&(abs(Compound_MolecularMass_Temp-AlignTarget_Compound_Mass)<Mass_Tol))
{
Compound_Align_List <- rbind(Compound_Align_List, Compound_List_ForAlign_File_Compound)
Compound_Mass_Align_Array <- c(Compound_Mass_Align_Array, Compound_MolecularMass_Temp)
RT_Align_Array <- c(RT_Align_Array, Compound_RT_Temp)
}
}
}
}
if(Two_Phase)
{
# According to the Compound list that can be aligned to the target compound, calculate the statistical center of the cluster compounds,then align the compounds to the statistical center
AlignTarget_RT <- median(RT_Align_Array) #(Compound_Align_List$Compound_Apex)
AlignTarget_Compound_Mass<- median(Compound_Mass_Align_Array)
Compound_Align_List <- NULL
Compound_Mass_Align_Array<- NULL
RT_Align_Array <- NULL
for(index_FileID in FileID_Min:FileID_Max)
{
Compound_List_ForAlign_File <- Compound_List_ForAlign[which(Compound_List_ForAlign$FileID==index_FileID),]
CompoundID_Min <- min(Compound_List_ForAlign_File$CompoundID)
CompoundID_Max <- max(Compound_List_ForAlign_File$CompoundID)
for(index_CompoundID in CompoundID_Min:CompoundID_Max)
{
Compound_List_ForAlign_File_Compound <- Compound_List_ForAlign_File[which(Compound_List_ForAlign_File$CompoundID==index_CompoundID),]
Compound_List_ForAlign_File_Compound_Mass <- Compound_List_ForAlign_File_Compound[which(Compound_List_ForAlign_File_Compound$Compound_MolecularMass>0),]
if(nrow(Compound_List_ForAlign_File_Compound_Mass)>0)
{
Compound_MolecularMass_Temp <- median(Compound_List_ForAlign_File_Compound_Mass$Compound_MolecularMass)
Compound_RT_Temp <- median(Compound_List_ForAlign_File_Compound_Mass$Compound_Apex)
#Compare the compound's retention time and molecular mass fall in the setting align window of the align target compound or not
if((abs(Compound_RT_Temp-AlignTarget_RT)<Align_Window_Phase2/2.0)&(abs(Compound_MolecularMass_Temp-AlignTarget_Compound_Mass)<Mass_Tol))
{
Compound_Align_List <- rbind(Compound_Align_List, Compound_List_ForAlign_File_Compound)
Compound_Mass_Align_Array <- c(Compound_Mass_Align_Array, Compound_MolecularMass_Temp)
RT_Align_Array <- c(RT_Align_Array, Compound_RT_Temp)
}
}
}
}
}
# Update the align result to Compound_List
if(nrow(Compound_Align_List)>0)
{
for(index_Align in 1:nrow(Compound_Align_List))
{
FileID_Temp <- Compound_Align_List[index_Align, ]$FileID
PeakID_Temp <- Compound_Align_List[index_Align, ]$PeakID
CompoundID_Temp <- Compound_Align_List[index_Align, ]$CompoundID
index_Peak <- which((Compound_List$FileID == FileID_Temp)&(Compound_List$PeakID == PeakID_Temp)&(Compound_List$CompoundID ==CompoundID_Temp))
Compound_List[index_Peak,]$Align_ID <-Align_ID
Compound_List[index_Peak,]$Align_RT <-median(RT_Align_Array)
}
Align_ID <- Align_ID+1
}
cat(paste("Num_Compound_List_ForAlign=",nrow(Compound_List_ForAlign), "\r", sep=""))
Compound_List_ForAlign <-Compound_List[which((Compound_List$Align_ID==0)&(Compound_List$CompoundID>0)),]
}
write.csv(Compound_List, file = Aligned_FileName) #Peaklist_w2_07_filter_Group.csv
}
|
d39b0be6f62cf09b26f1b46e08193e5510fe24c5 | 04a7c98ebecf2db764395c90455e8058711d8443 | /data-raw/phenotypes.R | 845e049dc151f544944989b84cf9768a2daa8592 | [] | no_license | Alice-MacQueen/switchgrassGWAS | f9be4830957952c7bba26be4f953082c6979fdf2 | 33264dc7ba0b54aff031620af171aeedb4d8a82d | refs/heads/master | 2022-02-01T01:12:40.807451 | 2022-01-17T20:56:20 | 2022-01-17T20:56:20 | 198,465,914 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,202 | r | phenotypes.R | # code to create phenotypes.rda goes here
# These phenotypes were merged and then carefully checked for errors and
# outliers in the `pvdiv-phenotypes` git repository.
# Thereafter, specific phenotypes were published as part of the switchgrass
# genome paper - seven environmental phenotypes, overwinter survival at three
# northern sites, and biomass in 2019 at the three core common gardens.
# Additionally, we include GWAS_CT, the number of times that genotype was
# clonally propagated and planted at these common gardens.
library(tidyverse)
phe_ex <- readRDS("~/Github/pvdiv-phenotypes/data/pre_replant/Phenotypes_all_pre_2019_replant.rds")
exampleGWAS <- phe_ex %>%
filter(!(PLANT_ID %in% c("AP13", "UNK"))) %>%
group_by(PLANT_ID, PLOT_GL) %>%
tally() %>% tally(name = "GWAS_CT")
envGWAS <- readRDS("~/Github/pvdiv-fitness-2019/data/Seven_climate_gwas.rds")
fitnessGWAS <- readRDS("~/Github/pvdiv-fitness-2019/data/Phenotypes_fitness_linear.rds") %>%
dplyr::select(PLANT_ID, FRAC_SRV_THREE, CLMB_BIOMASS, KBSM_BIOMASS, PKLE_BIOMASS)
phenotypes <- exampleGWAS %>%
full_join(envGWAS) %>%
full_join(fitnessGWAS)
usethis::use_data(phenotypes, compress = "gzip", overwrite = TRUE)
|
ca79e121f88cda7411333becc96fd366986831d8 | 7222a65ac496bf8e7b6ddcdd3e836765f6d9eb87 | /Batch_Future/Proposed_F.R | d356acd2d692d7f28cb089dfb98da52252a42f04 | [] | no_license | liangdai/online-experiment | 34ffc9259c03bab66cbd909ffc86b758ba297764 | 60f0cad1cf0187ddc0c15a8d4249b0b79ba18e72 | refs/heads/master | 2016-09-05T22:55:23.585430 | 2014-03-01T21:20:29 | 2014-03-01T21:20:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,842 | r | Proposed_F.R | inf = function(n,a,b){
x=0:n
#ans = sum(beta(a+x,b+n-x)*a(x,n,a,b)/(n+1)/beta(1+x,1+n-x)/beta(a,b))
ans = sum(exp(lbeta(a+x,b+n-x)-lbeta(1+x,1+n-x)-lbeta(a,b)-log(n+1)))
return(ans)
}
compute.prbopt3<-function(alpha1,beta1,alpha2,beta2){
f <- function(x){
r = x*pbeta(x,alpha2,beta2)-pbeta(x,alpha2+1,beta2)*alpha2/(beta2+alpha2)
r = r*dbeta(x,alpha1,beta1)
return (r)
}
ans= integrate(f,0,1)$value
return(ans)
}
library(cubature)
binomial_comp = function(p1,p2,n1,n2,N1=0,N2=0,R1=0,R2=0){
i = 0:n1
ans = sum(dbinom(i,n1,p1)*pbinom(ceiling((i+R1)*(n2+N2+1)/(n1+N1+1)-1)-R2,n2,p2))
return(ans)
}
compute.ereward = function(R1,N1,R2,N2,F,p1,T){
myfun <- function(x){
if(x[1]<x[2])
ans = dbeta(x[1],R1,N1-R1)*dbeta(x[2],R2,N2-R2)*((1-binomial_comp(x[2],x[1],floor(p1*T),T-floor(p1*T),N1,N2,R1,R2))*(x[2]-x[1])*F+floor(p1*T)*(x[2]-x[1]))
else
ans = dbeta(x[1],R1,N1-R1)*dbeta(x[2],R2,N2-R2)*((1-binomial_comp(x[1],x[2],T-floor(p1*T),floor(p1*T),N2,N1,R2,R1))*(x[1]-x[2])*F+(T-floor(p1*T))*(x[1]-x[2]))
return(ans)
}
adaptIntegrate(myfun,rep(0,2), rep(1,2),tol =0.001)
}
#compute.ereward(12,beta1_store[iter,i],alpha2_store[iter,i],beta2_store[iter,i],pos_time*num_batch,probm,(num_time-i+1)*num_batch)
correct_rpm = 0
for(iter in 1:num_iter){
print(iter)
alpha1_store[iter,1] = 1
beta1_store[iter,1] = 1
alpha2_store[iter,1] = 1
beta2_store[iter,1] = 1
idx_1 = 0
idx_2 = 0
stop = 0
for(i in 1:num_time){
min_r = 999999
prob_1 = 0
for(probm in seq(0,1,by = 0.1)){
exp_l =compute.ereward(alpha1_store[iter,i],alpha1_store[iter,i]+beta1_store[iter,i],alpha2_store[iter,i],alpha2_store[iter,i]+beta2_store[iter,i],(num_time-i)*num_batch,probm,num_batch)$integral
if(exp_l<min_r){
prob_1 = probm
min_r = exp_l
}
}
n_p1 = rbinom(1,size=num_actual_batch[i], prob=prob_1)
n_p2 = num_actual_batch[i] - n_p1
s = 0
if(n_p1>0)
s = sum(data1[iter,idx_1:(idx_1+n_p1-1)])
alpha1_store[iter,i+1] = alpha1_store[iter,i] + s
beta1_store[iter,i+1] = beta1_store[iter,i] + n_p1 - s
idx_1 = idx_1 + n_p1
s = 0
if(n_p2>0)
s = sum(data2[iter,idx_2:(idx_2+n_p2-1)])
alpha2_store[iter,i+1] = alpha2_store[iter,i] + s
beta2_store[iter,i+1] = beta2_store[iter,i] + n_p2 - s
idx_2 = idx_2 + n_p2
if(p1[iter]>=p2[iter])
loss_rpm[iter,i] = n_p2*(p1[iter]-p2[iter])
else
loss_rpm[iter,i] = n_p1*(p2[iter]-p1[iter])
}
post_loss[iter] = pos_time * num_batch *abs(p1[iter]-p2[iter])
if(((alpha1_store[iter,num_time+1]/beta1_store[iter,num_time+1])>(alpha2_store[iter,num_time+1]/beta2_store[iter,num_time+1]))==(p1[iter]>=p2[iter])){
correct_rpm = correct_rpm + 1
post_loss[iter] = 0
}
cum_rew[iter] = sum(data1[iter,1:idx_1])+sum(data2[iter,1:idx_2])
if(prob_1>=0.5)
post_cum_rew[iter] = rbinom(1,size=pos_time * num_batch, prob=p1[iter])
else
post_cum_rew[iter] = rbinom(1,size=pos_time * num_batch, prob=p2[iter])
}
print(correct_rpm)
print(mean(cum_rew))
print(mean(post_cum_rew))
print(mean(cum_rew+post_cum_rew))
print(mean(rowSums(loss_rpm)+post_loss))
print(mean(rowSums(loss_rpm)))
print(mean(post_loss))
ci = 0
for(i in 1:num_iter){
ci = ci + sqrt(alpha1_store[i,num_time+1]*beta1_store[i,num_time+1]/(alpha1_store[i,num_time+1]+beta1_store[i,num_time+1])^3+
alpha2_store[i,num_time+1]*beta2_store[i,num_time+1]/(alpha2_store[i,num_time+1]+beta2_store[i,num_time+1])^3)
}
print(ci/num_iter)
pow = function(alpha1,beta1,alpha2,beta2,z){
p1 = alpha1/(alpha1+beta1)
p2 = alpha2/(alpha2+beta2)
sd = sqrt(p1*(1-p1)/(alpha1+beta1)+p2*(1-p2)/(alpha2+beta2))
p = (alpha1+alpha2)/(alpha1+beta1+alpha2+beta2)
sd_m = sqrt(p*(1-p)/(alpha1+beta1)+p*(1-p)/(alpha2+beta2))
q1 = (qnorm(1-z/2)*sd_m-(p1-p2))/sd
q2 = (-qnorm(1-z/2)*sd_m-(p1-p2))/sd
ans = 1-pnorm(q1)+pnorm(q2)
return(ans)
}
ci = function(alpha1,beta1,alpha2,beta2){
p1 = alpha1/(alpha1+beta1)
p2 = alpha2/(alpha2+beta2)
sd = sqrt(p1*(1-p1)/(alpha1+beta1)+p2*(1-p2)/(alpha2+beta2))
return(sd)
}
pro_exp_power = pow(alpha1_store[,],beta1_store[,],alpha2_store[,],beta2_store[,],0.05)
pro_exp_ci = ci(alpha1_store[,],beta1_store[,],alpha2_store[,],beta2_store[,])
pro_winprob = matrix(0,num_iter,num_time+1)
for(i in 1:num_iter)
for(j in 1:(num_time+1)){
pro_winprob[i,j] = max(compute.win.prob(c(alpha1_store[i,j]-1,alpha2_store[i,j]-1),c(alpha1_store[i,j]+beta1_store[i,j]-1,alpha2_store[i,j]+beta2_store[i,j]-1),1000))
}
PRO_Power = colMeans(pro_exp_power)
PRO_CI = colMeans(pro_exp_ci)
PRO_Winprob = colMeans(pro_winprob)
PRO_Power_mean = mean(pro_exp_power[,(num_time+1)])
PRO_Power_sd = sd(pro_exp_power[,(num_time+1)])
PRO_CI_mean = mean(pro_exp_ci[,(num_time+1)])
PRO_CI_sd = sd(pro_exp_ci[,(num_time+1)])
PRO_Winprob_mean = mean(pro_winprob[,(num_time+1)])
PRO_Winprob_sd = sd(pro_winprob[,(num_time+1)])
PRO_SIZE1 = colMeans(alpha1_store+beta1_store)
PRO_SIZE2 = colMeans(alpha2_store+beta2_store)
|
96919403b90f62139ed73686148e73cfb5c1e973 | 4f3fd97558d140c15dfd166eb9b2df8573a84466 | /run_analysis.R | 395497884022d3a9c7c20ab8a8046dee486fc2a1 | [] | no_license | maxreimerson/getting_and_cleaning_data | cee756d656d956176272b093aacce41f18d7bc78 | d2c0c223edd9d98005ec32f391100e2b33d6c46f | refs/heads/master | 2020-08-29T12:36:45.416195 | 2019-10-30T14:28:30 | 2019-10-30T14:28:30 | 218,033,275 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,904 | r | run_analysis.R | # 1. Merges the training and the test sets to create one data set.
base_dir = "~/coursera/getting_cleaning_data/"
file_name = paste0(base_dir,"FUCI_HAR_Dataset.zip")
if (!file.exists(file_name)) {
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", file_name)
}
data_dir_name = paste0(base_dir, "UCI\ HAR\ Dataset/")
if(!dir.exists(data_dir_name)) {
unzip(file_name, exdir=base_dir)
}
train_X_file = paste0(data_dir_name, "train/X_train.txt")
test_X_file = paste0(data_dir_name, "test/X_test.txt")
train_y_file = paste0(data_dir_name, "train/y_train.txt")
test_y_file = paste0(data_dir_name, "test/y_test.txt")
test_df <- read.table(test_X_file, sep = "" , header = F, na.strings ="", stringsAsFactors= F)
train_df <- read.table(train_file, sep = "" , header = F, na.strings ="", stringsAsFactors= F)
# Add a column for the activity
test_y_df <- read.table(test_y_file, sep = "" , header = F, na.strings ="", stringsAsFactors= F)
test_df$Activity <- test_y_df$V1
train_y_df <- read.table(train_y_file, sep = "" , header = F, na.strings ="", stringsAsFactors= F)
train_df$Activity <- train_y_df$V1
merge_df <- merge(test_df, train_df, all=TRUE)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# Get column names
feature_names_file <- paste0(data_dir_name, "features.txt")
names_df <- read.table(feature_names_file, sep=" ", header=F, stringsAsFactors= F)
names(merge_df) <- c(names_df$V2, "Activity_ID")
cols <- grep("-[Mm][Ee][Aa][Nn]|-[Ss][Tt][Dd]|Activity_ID", names(merge_df))
mean_std_df <- merge_df[, cols]
head(mean_std_df)
# 3. Uses descriptive activity names to name the activities in the data setc
activity_labels_file <- paste0(data_dir_name, "activity_labels.txt")
activity_labels_df <- read.table(activity_labels_file, sep=" ", header=F, stringsAsFactors= F)
names(activity_labels_df) <- c('Activity_ID', 'Activity_Label')
with_activity_df <- merge(mean_std_df, activity_labels_df, by = 'Activity_ID', all.x = TRUE, all.y = FALSE) %>%
select(-Activity_ID) %>% rename(Activity = Activity_Label)
# 4. Appropriately labels the data set with descriptive variable names.
# Replace - with _ and remove brackets, make lowercase
new_names <- tolower(gsub("\\(\\)","", gsub("-", "_", names(with_activity_df))))
# Replace std with standarddeviation and acc with acceleration
new_names <- gsub("acc","acceleration",gsub("std", "standarddeviation", new_names))
names(with_activity_df) <- new_names
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
summarized_df <- as.data.frame(with_activity_df %>% group_by(activity) %>% summarise_all(list(mean)))
output_file <- paste0(base_dir, "clean_UCI_HAR.txt")
# Write out the data
write.table(summarized_df, output_file, row.names = FALSE)
|
6ace7ffb219f5b95bfff622ad50c99baa78f840d | ae61d13331aac97b5d5f1306300b254eab3f5292 | /plot2.R | f85141e462b9f120dd4fa480c5c91c5ef90d7541 | [] | no_license | KseniaCadaques/ExData_Plotting1 | 17de3f29677bd3b8beee717fa50dbb4102508f98 | 8fcd26d3d9ba13aa5806e5d09d0803302a576d99 | refs/heads/master | 2021-01-22T21:22:20.301037 | 2017-03-19T16:42:37 | 2017-03-19T16:42:37 | 85,417,344 | 0 | 0 | null | 2017-03-18T16:54:17 | 2017-03-18T16:54:17 | null | UTF-8 | R | false | false | 1,459 | r | plot2.R | fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
zipfile <- "exdata%2Fdata%2Fhousehold_power_consumption.zip"
datafile <- "household_power_consumption.txt"
## Checking if the working directory with the script
## does not contain already the data files (either in txt or zip form).
## If not, we download the zip file and unzip it in the working directory.
if(!file.exists(datafile)){
if(!file.exists(zipfile)){
download.file(fileUrl,method="curl")
unzip(zipfile)
}
else {
unzip(zipfile)
}
}
## Getting the names of the columns.
data <- read.table(datafile,
header=TRUE, sep=";", na.strings = "?", nrows=5)
names <- names(data)
## Getting the necessary data without reading all the file into memory.
data <- read.table("household_power_consumption.txt", header=TRUE, sep=";",
na.strings = "?", nrows=2880, skip=66636, col.names= names,
stringsAsFactors = FALSE)
## Converting Date and Time columns to "POSIXct" format with the lubridate library.
library(lubridate)
data$DateTime <- dmy_hms(paste(Date, Time))
## Creating "plot2.png" and copying it into the working directory.
plot(x = data$DateTime,
y = data$Global_active_power,
type="l",
xlab = "",
ylab = "Global Active Power (kilowatts)")
dev.copy(png, file = "plot2.png")
dev.off() |
1287c0061a8c13c2fea9d4b915d9956060ad59e7 | c12893fddae5e96acdfd8fcbc99ffde079a84857 | /slides/ml-basics/rsrc/ml-basic-riskmin-1-loss.R | 127bd8ce88641492143b46f4a564cc93d57ac2bd | [
"CC-BY-4.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-free-unknown"
] | permissive | Pizzaknoedel/lecture_i2ml | 0ddb98f02f27377bf3de73235adb5025fe6630f5 | a630c6608aa642d45439a0742511d47267a61c20 | refs/heads/master | 2023-02-24T19:10:23.075536 | 2021-01-31T11:43:20 | 2021-01-31T11:43:20 | 315,969,310 | 0 | 0 | CC-BY-4.0 | 2021-01-31T11:43:21 | 2020-11-25T14:49:45 | null | UTF-8 | R | false | false | 5,882 | r | ml-basic-riskmin-1-loss.R | ##################################################################
####################Loss in a function ###########################
##################################################################
# Aim: show loss of a single observation
#------------------------------------------------------------------
# Libraries
#------------------------------------------------------------------
library(ggplot2)
library(gridExtra)
library(grid)
#------------------------------------------------------------------
# Function: Plot model + show one loss
#------------------------------------------------------------------
#plot points + model
plot_data <- function(data){
ggplot(data = data, mapping = aes(x = x, y=y)) +
geom_point(size= 4) +
geom_line (aes(x =x, y= model_1))+
theme_classic() +
ylim(c(-5,15)) +
theme(axis.text=element_text(size=25),
axis.title=element_text(size=25),
panel.border = element_rect(colour = "black", fill=NA, size=0.5))
}
# plot + loss
plot_loss <- function (data, example, type = c("abs", "sqrd")){
loss_example_sqrd <- (model_1[example]-y[example])^2
loss_example_abs <- abs(model_1[example]-y[example])
if(type=="abs"){
plot <- plot_data(data) +
geom_segment(aes(x = x[example],
y = model_1[example],
xend = x[example],
yend = y[example]),
colour = "blue",
size = 1) +
annotate("text", x = 5.5, y = -5, label = bquote(bolditalic(L)(y,f(bold(.(round(x[example],2)))))~"="~"|"~.(round(model_1[example],2))~"-"~(.(round(y[example],2)))~"|"~"="~.(round(loss_example_abs,2))), size = 7)
}
if(type=="sqrd"){
plot <- plot_data(data) +
geom_rect(aes(xmin=x[example],
xmax=x[example]+loss_example_abs,
ymin=model_1[example],
ymax=y[example]),
fill="blue",
alpha=0.01,
inherit.aes = FALSE,
color = "blue") +
annotate("text", x = 5.5, y = -5, label = bquote(bolditalic(L)(y,f(bold(.(round(x[example],2)))))~"="~"("~.(round(model_1[example],2))~"-"~(.(round(y[example],2)))~")"^2~"="~.(round(loss_example_sqrd,2))), size = 7)
}
plot
}
#------------------------------------------------------------------
# Function: Loss function plot
#------------------------------------------------------------------
#different loss functions
loss_type <- function(x, type = c("abs", "sqrd")){
if(type=="abs") return_loss <- abs(x)
if(type=="sqrd")return_loss <- (x)^2
return_loss
}
plot_loss_function <- function (data, example, type = c("abs", "sqrd")){
#create data for loss function
x_seq <- seq(-5,5,0.01)
loss_function <- loss_type(x_seq, type)
data_loss_function <- data.frame(x = x_seq, y = loss_function)
#show loss data of example
loss <- loss_type(data$model_1-data$y, type)
residuals <- model_1 - y
data_examples <- data.frame(residuals = residuals, loss = loss)
#create plot
loss_function_plot <- ggplot(data = data_loss_function, mapping = aes(x = x, y = y))+
geom_line() +
geom_point(mapping = aes(x=residuals, y=loss), data = data_examples, shape =1, size = 4, color = "black")+
xlim(-5,5) +
theme_classic()+
geom_segment(mapping = aes(x = residuals,
y = loss,
xend = residuals,
yend = rep(0, length(residuals))),
colour = "black",
data = data_examples,
size= 1) +
geom_segment(mapping = aes(x = residuals[example],
y = loss[example],
xend = residuals[example],
yend = 0),
colour = "blue",
data = data_examples,
size = 1) +
xlab ( "y - f(x)")+
ylab ("L(y, f(x))") +
theme(axis.text=element_text(size=25),
axis.title=element_text(size=25),
panel.border = element_rect(colour = "black", fill=NA, size=0.5))
loss_function_plot
}
#------------------------------------------------------------------
# Function: Plot both plot
#------------------------------------------------------------------
plot_two <- function (data, example, type = c("abs", "sqrd")){
loss_plot <-plot_loss (data, example, type = type)
loss_plot
loss_function_plot <- plot_loss_function (data, example, type = type)
loss_function_plot
gridExtra::grid.arrange(loss_plot, loss_function_plot , ncol = 2)
}
#------------------------------------------------------------------
# Create data
#------------------------------------------------------------------
#######
# Create sample data
set.seed(1234)
#number of data points
n <- 15
# errors
sd <- 2
eps <- rnorm(n = n , mean = 0, sd = sd)
# x values
x <- seq (1,10,length.out = n)
#linear model
b0 <- 1.25
b1 <- 0.9
y <- b0 + b1*x + eps
#####
#model 1 & 1.5
model_1 <- 1.25 + 0.9*x
#####################################
# input for the functions:
#####################################
data <- data.frame(x = x, y = y, model_1 = model_1)
#at which observation do we have a closer look (here: number 4)
example <- 4
#------------------------------------------------------------------
# Plot both plot + save them
#------------------------------------------------------------------
# plot absolute loss
abs_plot <- plot_two (data, example, type = "abs")
ggsave(filename = "figure/ml-basic_riskmin-1-loss_abs.png", plot = abs_plot , width = 14, height = 5, units = "in")
# plot absolute loss
sqrd_plot <- plot_two (data, example, "sqrd")
ggsave(filename = "figure/ml-basic_riskmin-1-loss_sqrd.png", plot = sqrd_plot , width = 14, height = 5, units = "in")
|
e819fc0174178ee88c7c249754bdaec230c604e2 | 87b1d02bffc59280b28d211bbb62258a144c7223 | /man/theme_hmr.Rd | 1bb2acda8ac587fb6b5be2af5835a3667ad55b07 | [] | no_license | jscarlton/jscTools | 811daf35e316bf41bbc62f4a43a16496a69bd1c5 | baf1fb147f3d2db3425243f493b839e7ba0699ca | refs/heads/master | 2023-02-02T20:56:35.176730 | 2020-12-18T15:53:59 | 2020-12-18T15:53:59 | 52,059,335 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 728 | rd | theme_hmr.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/theme_hmr.R
\name{theme_hmr}
\alias{theme_hmr}
\title{A theme for plots for the Half Moon Reef paper
Based on ggthemes' fivethirtyeight, but only in that I used that as a shell}
\usage{
theme_hmr(
base_size = 12,
base_family = "Whitney-Medium",
title_family = "Whitney-Semibold"
)
}
\arguments{
\item{base_size}{Base font size for the plot. Defaults to 12.}
\item{base_family}{Base font family. Defaults to Whitney Medium.}
\item{title_family}{Family for the title font. Defaults to Whitney Semibold}
}
\description{
A theme for plots for the Half Moon Reef paper
Based on ggthemes' fivethirtyeight, but only in that I used that as a shell
}
|
1f22b6250460d39a76d58a10a018b134e7b61623 | 611c6eff6033f6aa943349d485aa68b1a7116d4e | /man/getGBIFpoints.Rd | 18a2c5fe01f0466eb778b68ec5c72cc5cb5030c7 | [] | no_license | cmerow/occCite | 9051d4860c32a33e89af79f6d71851f4e6043800 | 64f839c23f4a1ea543deee796fa270265b266206 | refs/heads/master | 2020-03-31T01:42:32.518824 | 2018-10-06T00:52:55 | 2018-10-06T00:52:55 | 151,784,102 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 975 | rd | getGBIFpoints.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getGBIFpoints.R
\name{getGBIFpoints}
\alias{getGBIFpoints}
\title{Download occurrence points from GBIF}
\usage{
getGBIFpoints(taxon, GBIFLogin = GBIFLogin,
GBIFDownloadDirectory = GBIFDownloadDirectory, limit = NULL)
}
\arguments{
\item{taxon}{A single species}
\item{GBIFLogin}{An object of class \code{\link{GBIFLogin}} to log in to GBIF to begin the download.}
\item{GBIFDownloadDirectory}{An optional argument that specifies the local directory where GBIF downloads will be saved. If this is not specified, the downloads will be saved to your current working directory.}
}
\value{
A list containing (1) a dataframe of occurrence data; (2) GBIF search metadata
}
\description{
Downloads occurrence points and useful related information for processing within other occCite functions
}
\examples{
getGBIFpoints(taxon="Gadus morhua", GBIFLogin = myGBIFLogin, GBIFDownloadDirectory = NULL);
}
|
50a64a3a166a34ba09eb0c0ef79c28bd62c5ea13 | e93cf6300b21ad0e89fcfd5d2ce05029c3247952 | /astsa/R/SVfilter.R | 7282c5482558acd3a82df375588444ec3acd8eba | [
"GPL-3.0-only"
] | permissive | wilsonify/TimeSeries | 51a1b80afa512aad4512471d8031049699b890f1 | 0d67fc256e6a406c560711d448f2db3f66203a23 | refs/heads/master | 2023-01-08T17:43:00.863159 | 2023-01-07T16:57:55 | 2023-01-07T16:57:55 | 146,666,517 | 0 | 0 | MIT | 2018-09-25T12:50:18 | 2018-08-29T22:38:08 | Jupyter Notebook | UTF-8 | R | false | false | 1,329 | r | SVfilter.R | SVfilter = function(num,y,phi0,phi1,sQ,alpha,sR0,mu1,sR1){
#
# see: http://www.stat.pitt.edu/stoffer/booty.pdf section 2.2 for details
# y is log(return^2)
# model is h_t+1 = phi0 + phi1*h_t + w_t
# y_t = alpha + h_t + v_t
# v_t is a mixture, see (14) of the above reference
#
#
#
# Initialize
y=as.matrix(y)
Q=sQ^2
R0=sR0^2
R1=sR1^2
# initial values
xp=matrix(0,num,1) # = h_t+1^t
Pp=matrix(0,num,1) # = P_t+1^t
xp[1]= 0 # = h_1^0 | E(h_0) = mu 0 = 0 initial conditions
Pp[1]= phi1^2 + Q # = P_1^0 | var(h_0) = Sigma0 = 1
pi1=.5 # initial mix probs
pi0=.5
pit1=.5
pit0=.5
like=0 # -log(likelihood)
#
for (i in 2:num){
sig1 = Pp[i-1]+R1 #innov var
sig0 = Pp[i-1]+R0
k1 = phi1*Pp[i-1]/sig1
k0 = phi1*Pp[i-1]/sig0
e1 = y[i]-xp[i-1]-mu1-alpha
e0 = y[i]-xp[i-1]-alpha
den1 = (1/sqrt(sig1))*exp(-.5*e1^2/sig1)
den0 = (1/sqrt(sig0))*exp(-.5*e0^2/sig0)
denom = pi1*den1+pi0*den0
pit1 = pi1*den1/denom
pit0 = pi0*den0/denom
#
xp[i] = phi0 + phi1*xp[i-1] + pit0*k0*e0 + pit1*k1*e1
Pp[i] = (phi1^2)*Pp[i-1] + Q - pit0*(k0^2)*sig0 - pit1*(k1^2)*sig1
#
like = like - 0.5*log(pi1*den1 + pi0*den0)
}
list(xp=xp,Pp=Pp,like=like)
}
|
ab1ec03d860a70570982637bdacc3bb11215e3bd | 24be4b4c1662427892595214713eb1ffee86929f | /R Code Elementary.R | 4d1214d9a2b310bd4f99d2e047313afd0a079e13 | [] | no_license | MsRaimo/My-first-R-work | 833bb26415a384ce736fe487abd5b675f64d2d1b | 570cfd060c535ed6c4b7b47b1ae3367a6cd7a723 | refs/heads/master | 2016-09-10T08:22:03.493162 | 2015-04-21T14:40:10 | 2015-04-21T14:40:10 | 21,013,897 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,736 | r | R Code Elementary.R |
#//////////////////////////////////////////////////////////////////////////////
#/// ///
#/// Spring & Spring Medians - READING Grades 3-6 for Templates ///
#/// School, Grade, Program ///
#/// ///
#//////////////////////////////////////////////////////////////////////////////
dat<-read.table("Z:/admdata/DATA/AssessmentData/2013-2014 CSIp/CSIP Templates/Spring Background Data/ElemReadRaw.csv", header=TRUE, sep=",")
attach(dat)
ID<-factor(ID)
Grade<-factor(Grade)
School<-factor(School)
prog<-factor(prog)
library(plyr)
t<-ddply(dat, .(School, Grade, prog), summarise, Num=sum(count), Spring13Median=median(Sp13read), Spring14Median=median(Sp14read))
write.csv(t, file="Z:/admdata/DATA/AssessmentData/2013-2014 CSIp/CSIP Templates/Spring Background Data/SpringElemREAD.csv")
detach(dat)
rm(list=ls())
#//////////////////////////////////////////////////////////////////////////////
#/// ///
#/// Spring & Spring Medians - MATH Grades 3-6 for Templates ///
#/// School, Grade, Program ///
#/// ///
#//////////////////////////////////////////////////////////////////////////////
dat<-read.table("Z:/admdata/DATA/AssessmentData/2013-2014 CSIp/CSIP Templates/Spring Background Data/ElemMathRaw.csv", header=TRUE, sep=",")
attach(dat)
ID<-factor(ID)
Grade<-factor(Grade)
School<-factor(School)
prog<-factor(prog)
library(plyr)
t<-ddply(dat, .(School, Grade, prog), summarise, Num=sum(count), Spring13Median=median(Sp13ma), Spring14Median=median(Sp14ma))
write.csv(t, file="Z:/admdata/DATA/AssessmentData/2013-2014 CSIp/CSIP Templates/Spring Background Data/SpringElemMATH.csv")
detach(dat)
rm(list=ls())
#//////////////////////////////////////////////////////////////////////////////
#/// ///
#/// Spring Medians - READING Grades 3-6 for Templates ///
#/// Grade, Program ///
#/// ///
#//////////////////////////////////////////////////////////////////////////////
dat<-read.table("Z:/admdata/DATA/AssessmentData/2013-2014 CSIp/CSIP Templates/Spring Background Data/ElemReadingRawGlobal.csv", header=TRUE, sep=",")
attach(dat)
ID<-factor(ID)
Grade<-factor(Grade)
School<-factor(School)
genorELL<-factor(genorELL)
library(plyr)
t<-ddply(dat, .(Grade, genorELL), summarise, Mean=mean(Sp14read), Spring13Median=median(Sp13read), Spring14Median=median(Sp14read))
write.csv(t, file="Z:/admdata/DATA/AssessmentData/2013-2014 CSIp/CSIP Templates/Spring Background Data/SpringReadingMedians.csv")
detach(dat)
rm(list=ls())
#//////////////////////////////////////////////////////////////////////////////
#/// ///
#/// Spring Medians - MATH Grades 3-6 for Templates ///
#/// Grade, Program ///
#/// ///
#//////////////////////////////////////////////////////////////////////////////
dat<-read.table("Z:/admdata/DATA/AssessmentData/2013-2014 CSIp/CSIP Templates/Spring Background Data/ElemMathRawGlobal.csv", header=TRUE, sep=",")
attach(dat)
ID<-factor(ID)
Grade<-factor(Grade)
School<-factor(School)
genorELL<-factor(genorELL)
library(plyr)
t<-ddply(dat, .(Grade, genorELL), summarise,Mean=mean(Sp14ma), Spring13Median=median(Sp13ma), Spring14Median=median(Sp14ma))
write.csv(t, file="Z:/admdata/DATA/AssessmentData/2013-2014 CSIp/CSIP Templates/Spring Background Data/SpringMathMedians.csv")
detach(dat)
rm(list=ls())
#//////////////////////////////////////////////////////////////////////////////
#/// ///
#/// Spring Medians - READING Grade 2 for Templates ///
#/// School, Grade, Program ///
#/// ///
#//////////////////////////////////////////////////////////////////////////////
dat<-read.table("Z:/admdata/DATA/AssessmentData/2013-2014 CSIp/CSIP Templates/Spring Background Data/2ndGradeReadingRaw.csv", header=TRUE, sep=",")
attach(dat)
ID<-factor(ID)
Grade<-factor(Grade)
School<-factor(School)
Course<-factor(Program)
lang<-factor(lang)
Text.Level<-as.factor(Textlevel)
library(plyr)
t2<-ddply(dat, .(School, Grade, Program), summarise, Num=sum(n),AtRisk=sum(n[Textlevel=="At Risk"]),GtoI=sum(n[Textlevel=="G to I"]), JandAbove=sum(n[Textlevel=="J and Above"]), MedianTL=median(TLScale),SpringMedian=median(Sp14read) )
write.csv(t2, file="Z:/admdata/DATA/AssessmentData/2013-2014 CSIp/CSIP Templates/Spring Background Data/SpringGr2READING_TextLevel.csv")
detach(dat)
rm(list=ls())
#//////////////////////////////////////////////////////////////////////////////
#/// ///
#/// Spring MATH Grades 2 for Templates ///
#/// School, Grade, Program ///
#/// ///
#//////////////////////////////////////////////////////////////////////////////
dat<-read.table("Z:/admdata/DATA/AssessmentData/2013-2014 CSIp/CSIP Templates/Spring Background Data/2ndGradeMathRaw.csv", header=TRUE, sep=",")
attach(dat)
ID<-factor(ID)
Grade<-factor(Grade)
School<-factor(School)
Program<-factor(Program)
library(plyr)
t<-ddply(dat, .(School, Grade, Program), summarise, Num=sum(n), Median=median(Sp14ma))
write.csv(t, file="Z:/admdata/DATA/AssessmentData/2013-2014 CSIp/CSIP Templates/Spring Background Data/SpringGrade2_Math.csv")
detach(dat)
rm(list=ls())
#//////////////////////////////////////////////////////////////////////////////
#/// ///
#/// Spring Medians - READING Grade 2 for TOP Templates ///
#/// School, Grade, Program ///
#/// ///
#//////////////////////////////////////////////////////////////////////////////
dat<-read.table("Z:/admdata/DATA/AssessmentData/2013-2014 CSIp/CSIP Templates/Spring Background Data/2ndGradeReadingRaw.csv", header=TRUE, sep=",")
attach(dat)
ID<-factor(ID)
Grade<-factor(Grade)
School<-factor(School)
prog<-factor(prog)
lang<-factor(lang)
Text.Level<-as.factor(Textlevel)
library(plyr)
t2<-ddply(dat, .(Grade, prog), summarise, Num=sum(n),SpringMedian=median(Sp14read) )
write.csv(t2, file="Z:/admdata/DATA/AssessmentData/2013-2014 CSIp/CSIP Templates/Spring Background Data/SpringGr2READING TOP.csv")
detach(dat)
rm(list=ls())
#//////////////////////////////////////////////////////////////////////////////
#/// ///
#/// Spring MATH Grades 2 for TOP Templates ///
#/// School, Grade, Program ///
#/// ///
#//////////////////////////////////////////////////////////////////////////////
dat<-read.table("Z:/admdata/DATA/AssessmentData/2013-2014 CSIp/CSIP Templates/Spring Background Data/2ndGradeMathRaw.csv", header=TRUE, sep=",")
attach(dat)
ID<-factor(ID)
Grade<-factor(Grade)
School<-factor(School)
prog<-factor(prog)
library(plyr)
t<-ddply(dat, .(Grade, prog), summarise, Num=sum(n), Median=median(Sp14ma))
write.csv(t, file="Z:/admdata/DATA/AssessmentData/2013-2014 CSIp/CSIP Templates/Spring Background Data/SpringGrade2_Math TOP.csv")
detach(dat)
rm(list=ls())
#//////////////////////////////////////////////////////////////////////////////
#/// CHANNING ///
#/// Spring & Spring Medians - READING Grades 3-6 for Templates ///
#/// School, Grade, Program ///
#/// ///
#//////////////////////////////////////////////////////////////////////////////
dat<-read.table("Z:/admdata/DATA/AssessmentData/2013-2014 CSIp/CSIP Templates/Spring Background Data/ElemReadRaw.csv", header=TRUE, sep=",")
attach(dat)
ID<-factor(ID)
Grade<-factor(Grade)
School<-factor(School)
type<-factor(type)
library(plyr)
t<-ddply(dat, .(School, Grade, type), summarise, Num=sum(count), Spring13Median=median(Sp13read), Spring14Median=median(Sp14read))
write.csv(t, file="Z:/admdata/DATA/AssessmentData/2013-2014 CSIp/CSIP Templates/Spring Background Data/SpringElemREADCHANNING.csv")
detach(dat)
rm(list=ls())
#//////////////////////////////////////////////////////////////////////////////
#/// CHANNING ///
#/// Spring & Spring Medians - MATH Grades 3-6 for Templates ///
#/// School, Grade, Program ///
#/// ///
#//////////////////////////////////////////////////////////////////////////////
dat<-read.table("Z:/admdata/DATA/AssessmentData/2013-2014 CSIp/CSIP Templates/Spring Background Data/ElemMathRaw.csv", header=TRUE, sep=",")
attach(dat)
ID<-factor(ID)
Grade<-factor(Grade)
School<-factor(School)
type<-factor(type)
library(plyr)
t<-ddply(dat, .(School, Grade, type), summarise, Num=sum(count), Spring13Median=median(Sp13ma), Spring14Median=median(Sp14ma))
write.csv(t, file="Z:/admdata/DATA/AssessmentData/2013-2014 CSIp/CSIP Templates/Spring Background Data/SpringElemMATHCHANNING.csv")
detach(dat)
rm(list=ls())
|
ecda451595a26cee1b4e66c8265da9c042ee78bc | 0d7bc1c49cabb4b8b6225199413cd2ee8a4c1b93 | /plot1.R | fc3c5b51cb0ac386552bc06ebb36604e55dea094 | [] | no_license | jameshughe5/ExData_Plotting1 | 482ffbd5b4f688d5d36234a26aeca18586c693ef | 911b57669aba010240dcc22dba1a14e9fd85e1c7 | refs/heads/master | 2020-04-24T11:23:50.248382 | 2019-02-21T18:54:10 | 2019-02-21T18:54:10 | 171,923,539 | 0 | 0 | null | 2019-02-21T18:30:06 | 2019-02-21T18:30:06 | null | UTF-8 | R | false | false | 631 | r | plot1.R | # Load data set in R
setwd("~/R/ExData_Plotting1")
dat <- read.csv("~/R/ExData_Plotting1/household_power_consumption.txt", sep=";")
# Convert dates into date class and subset
dat$Date <- as.Date(dat$Date, format = "%d/%m/%Y")
dat2 <- dat[dat$Date >= "2007-02-01" & dat$Date <= "2007-02-02",]
rm(dat)
# Plot a histogram of global active power (Plot 1)
hist(as.numeric(as.character(dat2$Global_active_power)), breaks = 12, col = "Red",
xlab = "Global Active Power (kilowatts)", ylab = "Frequency",
main = "Global Active Power")
# Save plot as PNG
dev.copy(png, 'plot1.png')
dev.off()
rm(list = ls()) |
02cd5e71c5e1af47a5d8b00628c151edcfc8bf61 | 4f9b276d0ca918222d7a066bab4ba27336e4581a | /processDefects.R | 12d136762481cca3edf3b8ee00cdbc421db1bbb8 | [] | no_license | traghunath98/automation | 23f86dddf527607d398d5b308868e7429729ac18 | b4d72bdf6fd696927546c43ac5c1d8ef6a0a20e3 | refs/heads/master | 2021-07-19T20:47:29.177317 | 2021-02-21T04:58:45 | 2021-02-21T04:58:45 | 38,445,545 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,631 | r | processDefects.R | ## The objective of these sets of functions are to automate the process of analysis of defects
## First we will read the defects from a CSV file into a data frame. We will then transform the
## the data frame to give us the necessary critical insights.
## All graphs must be generated at the click of a button.
##
##
## Each function is documented below in detail to highlight its specific responsibility.
##
##
##
## This function - processDefects is the main function that takes the file name as input and applies transformation
##
processDefects <- function(x_file = character()) {
defects <- data.frame()
if(!file.exists(x_file)){
stop("invalid file")
}
## read the defects file
defects <- read.csv(x_file, stringsAsFactors=FALSE)
## set the column names appropriately
defects <- defineColNames(defects)
defects <- convertToFactors(defects)
## Get the defects counts by Severity and ones in Open Status
status_codes <- c("New", "Open", "Analysis Complete","Blocked", "Deferred", "Deferred - Requirement Change", "Dev In Process", "Duplicate", "Need more info", "Reopen","Retest","Requirement Change")
all_status_codes <- levels(defects$DefectStatus)
resolved_status_codes <- setdiff(all_status_codes, status_codes)
severity <- levels(defects$Severity)
xtab_S_St <<- getXParamCrossTab(defects, status_codes, c("DefectStatus","Severity"), confidence=1.01, col_confidence=1.01)
xtab_FA_TA <<- getXParamCrossTab(defects, status_codes, c("FunctionalArea","TechnicalArea"), confidence=0.9, col_confidence=0.99)
xtab_FA_P <<- getXParamCrossTab(defects, status_codes, c("FunctionalArea","Product"), confidence=0.8, col_confidence=0.9)
xtab_TA_P <<- getXParamCrossTab(defects, status_codes, c("TechnicalArea","Product"), confidence=0.91, col_confidence=0.91)
xtab_RCA_Env <<- getXParamCrossTab(defects, resolved_status_codes,c("RCACode","DetectedEnv"), confidence=0.96, col_confidence=0.995)
xtab_RCA_DS <<- getXParamCrossTab(defects, resolved_status_codes,c("DefectSource","RCACode"), confidence=0.99, col_confidence=0.95)
xtab_RCA_P <<- getXParamCrossTab(defects, resolved_status_codes, c("RCACode","Product"), confidence=0.90, col_confidence=0.95)
ls_cumDefects <<- cumulativeDefects(defects)
defOpen <- ls_cumDefects[["cumOpen"]]
defClose <- ls_cumDefects[["cumClose"]]
fileName <- "CCUW_Defects_Analysis_1Jul15.pdf"
if(file.exists(fileName)){
file.remove(fileName)
}
pdf(fileName)
par("cex"=0.8,"cex.axis"=0.8)
## Plot the first graph with Status and Severity
n_colors <- nrow(xtab_S_St)
n_columns <- ncol(xtab_S_St)
cols <- c("yellow","red")
barplot(t(xtab_S_St), ylim=c(0,max(xtab_S_St[,1])), main="Defect Status and Severity", xlab="Defect Status", ylab="Defect Count", axes=TRUE,cex.axis=par("cex"), cex.names=par("cex.axis"), beside=TRUE,col=cols,legend=TRUE)
grid( round(max(xtab_S_St[,1])*1.1/20,0),lty=1,lwd=0.5)
barplot(t(xtab_S_St), ylim=c(0,max(xtab_S_St[,1])), main="Defect Status and Severity", xlab="Defect Status", ylab="Defect Count", axes=TRUE,cex.axis=par("cex"), cex.names=par("cex.axis"), beside=TRUE,col=cols,legend=TRUE,add=TRUE)
box()
## Plot the second graph with Functional Area and Technical Area
n_colors <- nrow(xtab_FA_TA)
n_columns <- ncol(xtab_FA_TA)
#cols <- colors()[sample(450:550,n_colors)]
cols <- rainbow(n_colors)
barplot(xtab_FA_TA, ylim=c(0,sum(xtab_FA_TA[,1])), main="Open Defects by Functionality and Technology", xlab="Technical Area", ylab="Defect Count", axes=TRUE,cex.axis=par("cex"), cex.names=par("cex.axis"), beside=FALSE,col=cols,legend=TRUE)
grid( round(max(xtab_FA_TA[,1])/20,0),lty=1,lwd=0.5)
barplot(xtab_FA_TA, ylim=c(0,sum(xtab_FA_TA[,1])), main="Open Defects by Functionality and Technology", xlab="Technical Area", ylab="Defect Count", axes=TRUE,cex.axis=par("cex"), cex.names=par("cex.axis"), beside=FALSE,col=cols,legend=TRUE, add=TRUE)
box()
## Plot the third graph with Functional Area and Product
n_colors <- nrow(xtab_FA_P)
n_columns <- ncol(xtab_FA_P)
cols <- rainbow(n_colors)
barplot(xtab_FA_P, ylim=c(0,sum(xtab_FA_P[,1])), main="Open Defects by Functionality and Product", xlab="Product", ylab="Defect Count", axes=TRUE,cex.axis=par("cex"), cex.names=par("cex.axis"), beside=FALSE,col=cols,legend=TRUE)
grid( round(max(xtab_FA_P[,1])/25,0),lty=1,lwd=0.5)
barplot(xtab_FA_P, ylim=c(0,sum(xtab_FA_P[,1])), main="Open Defects by Functionality and Product", xlab="Product", ylab="Defect Count", axes=TRUE,cex.axis=par("cex"), cex.names=par("cex.axis"), beside=FALSE,col=cols,legend=TRUE, add=TRUE)
box()
## Plot the fourth graph with Technical Area and Product
n_colors <- nrow(xtab_TA_P)
n_columns <- ncol(xtab_TA_P)
cols <- terrain.colors(n_colors)
barplot(xtab_TA_P, ylim=c(0,sum(xtab_TA_P[,1])), main="Open Defects by Technical Area and Product", xlab="Product", ylab="Defect Count", axes=TRUE,cex.axis=par("cex"), cex.names=par("cex.axis"), beside=FALSE,col=cols,legend=TRUE)
grid( round(max(xtab_TA_P[,1])/25,0),lty=1,lwd=0.5)
barplot(xtab_TA_P, ylim=c(0,sum(xtab_TA_P[,1])), main="Open Defects by Technical Area and Product", xlab="Product", ylab="Defect Count", axes=TRUE,cex.axis=par("cex"), cex.names=par("cex.axis"), beside=FALSE,col=cols,legend=TRUE, add=TRUE)
box()
## Plot the fifth graph with RCA Code and Environment Detected
n_colors <- nrow(xtab_RCA_Env)
n_columns <- ncol(xtab_RCA_Env)
cols <- heat.colors(n_colors)
barplot(xtab_RCA_Env, ylim=c(0,sum(xtab_RCA_Env[,1])), main="Defects by RCA Code and Environment detected", xlab="Environment Detected", ylab="Defect Count", axes=TRUE,cex.axis=par("cex"), cex.names=par("cex.axis"), beside=FALSE,col=cols,legend=TRUE)
grid( round(max(xtab_RCA_Env[,1])/25,0),lty=1,lwd=0.5)
barplot(xtab_RCA_Env, ylim=c(0,sum(xtab_RCA_Env[,1])), main="Defects by RCA Code and Environment detected", xlab="Environment Detected", ylab="Defect Count", axes=TRUE,cex.axis=par("cex"), cex.names=par("cex.axis"), beside=FALSE,col=cols,legend=TRUE, add=TRUE)
box()
## Plot the Sixth graph with RCA Code and Defect Source
n_colors <- nrow(xtab_RCA_DS)
n_columns <- ncol(xtab_RCA_DS)
cols <- cm.colors(n_colors)
barplot(xtab_RCA_DS, ylim=c(0,sum(xtab_RCA_DS[,1])), main="Defects by RCA Code and Defect Source", xlab="RCA Code", ylab="Defect Count", axes=TRUE,cex.axis=par("cex"), cex.names=par("cex.axis"), beside=FALSE,col=cols,legend=TRUE)
grid( round(max(xtab_RCA_DS[,1])/25,0),lty=1,lwd=0.5)
barplot(xtab_RCA_DS, ylim=c(0,sum(xtab_RCA_DS[,1])), main="Defects by RCA Code and Defect Source", xlab="RCA Code",ylab="Defect Count", axes=TRUE,cex.axis=par("cex"), cex.names=par("cex.axis"), beside=FALSE,col=cols,legend=TRUE, add=TRUE)
box()
## Plot the graph with cumulative defects
plot(defOpen$startDate, defOpen$cumSum, type="l", col="red", lty=1,lwd=2, main="Cumulative Defects", xlab="Month", ylab="Defect Count",axes=FALSE)
axis(side=1, at=defOpen$startDate, labels=format(defOpen$startDate,"%b-%y"))
axis(side=2, at=NULL, labels=TRUE, las=2, ylim=c(0,1.1*max(defOpen$cumSum)))
lines(defClose$closeDate, defClose$cumSum, type="l", col="blue", lty=2,lwd=2, axes=FALSE)
grid(max(defOpen$cumSum)/100,lty=1,lwd=0.5)
box()
## Plot the graph with RCA Code and Product
n_colors <- nrow(xtab_RCA_P)
n_columns <- ncol(xtab_RCA_P)
cols <- terrain.colors(n_colors)
barplot(xtab_RCA_P, ylim=c(0,sum(xtab_RCA_P[,1])), main="Understanding RCA Codes by Product", xlab="Product", ylab="Defect Count", axes=TRUE,cex.axis=par("cex"), cex.names=par("cex.axis"), beside=FALSE,col=cols,legend=TRUE,las=3, srt=45)
grid( round(max(xtab_RCA_P[,1])/25,0),lty=1,lwd=0.5)
barplot(xtab_RCA_P, ylim=c(0,sum(xtab_RCA_P[,1])), main="Understanding RCA Codes by Product", xlab="Product", ylab="Defect Count", axes=TRUE,cex.axis=par("cex"), cex.names=par("cex.axis"), beside=FALSE,col=cols,legend=TRUE, add=TRUE, las=3, srt=45)
box()
dev.off()
}
## This function sets the correct column names that are easy to understand and use
## The function assumes that file is as per a standard template derived from HPQC
defineColNames <- function(defects) {
if(!(class(defects) =="data.frame")){
stop("Invalid input - this function expects defects data as a dataframe")
}
## Set all the column names individually
colnames(defects)[1] <- "DefectID"
colnames(defects)[2] <- "DetectedDate"
colnames(defects)[3] <- "TargetFixDate"
colnames(defects)[4] <- "DefectStatus"
colnames(defects)[5] <- "Severity"
colnames(defects)[6] <- "Priority"
colnames(defects)[7] <- "AssignedTo"
colnames(defects)[8] <- "ScrumTeam"
colnames(defects)[9] <- "BusinessDescription"
colnames(defects)[10] <- "TechnicalArea"
colnames(defects)[11] <- "Title"
colnames(defects)[12] <- "RelatedDefectID"
colnames(defects)[13] <- "Product"
colnames(defects)[14] <- "ResolutionRelNo"
colnames(defects)[15] <- "DetectedEnv"
colnames(defects)[16] <- "SpecialTag"
colnames(defects)[17] <- "DetectedBy"
colnames(defects)[18] <- "IssueType"
colnames(defects)[19] <- "Coverages"
colnames(defects)[20] <- "FunctionalArea"
colnames(defects)[21] <- "FeatureArea"
colnames(defects)[22] <- "ESystem"
colnames(defects)[23] <- "TransactionType"
colnames(defects)[24] <- "ClosingDate"
colnames(defects)[25] <- "PriorityReviewed"
colnames(defects)[26] <- "DefectCategory"
colnames(defects)[27] <- "DevCommitFixDate"
colnames(defects)[28] <- "State"
colnames(defects)[29] <- "ResolutionBuildNo"
colnames(defects)[30] <- "Project"
colnames(defects)[31] <- "UATDefectID"
colnames(defects)[32] <- "DefectSource"
colnames(defects)[35] <- "ActualFixDate"
colnames(defects)[36] <- "CodeReviewed"
colnames(defects)[37] <- "DetectedInCycle"
colnames(defects)[38] <- "DetectedInRelease"
colnames(defects)[39] <- "FormID"
colnames(defects)[40] <- "FormType"
colnames(defects)[41] <- "IncidentTicketCount"
colnames(defects)[42] <- "CreateDate"
colnames(defects)[43] <- "AnalysisCompleteDate"
colnames(defects)[44] <- "DevCompleteDate"
colnames(defects)[45] <- "MassDMLRequired"
colnames(defects)[46] <- "RCACode"
colnames(defects)[47] <- "RCADescription"
colnames(defects)[48] <- "RCAUpdateComments"
colnames(defects)[49] <- "UATCycle"
return(defects)
}
## This function takes the defects dataframe and converst some of the key columns into factors
## This function has to be called after the column names are modified appropriately - else it will not work
convertToFactors <- function(defects){
if(!(class(defects)=="data.frame")){
stop("Invalid input - this function expects defects as a dataframe")
}
defects$DefectStatus <- as.factor(defects$DefectStatus)
defects$Severity <- as.factor(defects$Severity)
defects$Priority <- as.factor(defects$Priority)
defects$TechnicalArea <- as.factor(defects$TechnicalArea)
defects$Product <- as.factor(defects$Product)
defects$DetectedEnv <- as.factor(defects$DetectedEnv)
defects$FunctionalArea <- as.factor(defects$FunctionalArea)
defects$IssueType <- as.factor(defects$IssueType)
defects$TransactionType <- as.factor(defects$TransactionType)
defects$DefectCategory <- as.factor(defects$DefectCategory)
defects$DefectSource <- as.factor(defects$DefectSource)
defects$RCACode <- as.factor(defects$RCACode)
return(defects)
}
## This function takes a) defects dataframe, b) status codes c) list of 2 parameters and returns a cross tab based on the two parameters
## If more than 2 parameters are provided, the function takes only the first 2 parameters
getXParamCrossTab <- function(defects=data.frame(), status_codes = character(), parameters=character(), confidence=1.01,col_confidence=1.01){
if(length(parameters)<2){
stop("Insufficient Parameters")
}
## Get subset of defects by the status codes
sub_defects <- subset(defects, defects$DefectStatus %in% status_codes)
## Get the index corresponding to the parameters
c1 <- grep(parameters[1],names(sub_defects))
c2 <- grep(parameters[2], names(sub_defects))
df_xtab <- xtabs(~ sub_defects[,c1] + sub_defects[,c2], data=sub_defects, exclude=c("","<<None>>"))
df_xtab <- processXTab(df_xtab, confidence, col_confidence)
return(df_xtab)
}
## This function takes a cross tab and performs the following:
## a) Sorts by rows and columns, b) adds row sum and column sum, c) adds percentage values and d) adds cumulative percentage
## This function will work only if a cross tab is passed in the argument
processXTab <- function(df_xtab, confidence=1.01, col_confidence=1.01){
xrow <- nrow(df_xtab)
xcol <- ncol(df_xtab)
## Add the row sum and sort in descending order. Then remove the row sum column
row_sum <- apply(df_xtab,1,sum)
df_xtab <- cbind(df_xtab, row_sum)
df_xtab <- df_xtab[order(df_xtab[,xcol+1],decreasing=TRUE),]
df_xtab <- df_xtab[,-(xcol+1)]
## Add the col sum row and sort in descending order. Then delete teh col sum row
col_sum <- apply(df_xtab,2,sum)
df_xtab <- rbind(df_xtab, col_sum)
df_xtab <- df_xtab[,order(df_xtab[xrow+1,],decreasing=TRUE)]
## Add back the row sum
row_sum <- apply(df_xtab, 1, sum)
df_xtab <- cbind(df_xtab, row_sum)
## Compute and add row percentages
row_pct <- df_xtab[,"row_sum"]/df_xtab["col_sum","row_sum"]
df_xtab <- cbind(df_xtab, row_pct)
if(row_pct[1]>=confidence){
confidence <- row_pct[1]
}
## Compute and add row cumulative percentages
row_cumSum <- cumsum(df_xtab[,"row_pct"])
df_xtab <- cbind(df_xtab, row_cumSum)
df_xtab <- df_xtab[df_xtab[,"row_cumSum"]<=confidence,]
## Compute and add Col sums / Percentage / Cumulative Percent to reduce the number of columns
col_sum <- apply(df_xtab,2,sum)
col_pct <- col_sum/col_sum[length(col_sum)-2]
col_pct <- col_pct[1:(length(col_pct)-3)]
if(col_pct[1] >= col_confidence){
col_confidence <- 1.0
}
cum_col_pct <- cumsum(col_pct)
index <- length(cum_col_pct[cum_col_pct<=col_confidence])
df_xtab <- as.matrix(df_xtab[,1:index])
return(df_xtab)
}
## This function takes the defects and returns the cumulative defects open and close numbers data
cumulativeDefects <- function(defects){
# Order the defects by detected date (or detected date)
defects <- defects[order(defects[,"DetectedDate"],decreasing=FALSE),]
defects$startDate <- as.Date(defects$DetectedDate,"%m/%d/%Y")
defects$closeDate <- as.Date(defects$ClosingDate, "%m/%d/%Y")
temp <- tapply(defects$DefectID, cut(defects$startDate, "month"), length)
openDefects <- data.frame(startDate=as.Date(names(temp),"%Y-%m-%d"),openCnt=as.numeric(temp))
openDefects <- openDefects[order(openDefects[,"startDate"],decreasing=FALSE),]
openDefects$cumSum <- cumsum(openDefects$openCnt)
t_closeDefects <- data.frame(ID=defects$DefectID, closeDate=defects$closeDate)
t_closeDefects <- subset(t_closeDefects, !is.na(t_closeDefects$closeDate))
t_closeDefects <- subset(t_closeDefects, t_closeDefects$closeDate >= min(openDefects$startDate))
t_temp <- tapply(t_closeDefects$ID, cut(t_closeDefects$closeDate, "month"), length)
closeDefects <- data.frame(closeDate=as.Date(names(t_temp),"%Y-%m-%d"), closeCnt=as.numeric(t_temp))
closeDefects <- closeDefects[order(closeDefects[,"closeDate"],decreasing=FALSE),]
closeDefects$cumSum <- cumsum(closeDefects$closeCnt)
rm(temp)
rm(t_temp)
ls_cumDefects <- list(cumOpen=openDefects, cumClose=closeDefects)
return(ls_cumDefects)
} |
4a39b51215105b150d8c870ea2dd22c2f4184bb0 | fe705643e2a84e96b8bfb6bc906e2900c2a35427 | /R/pca_transformation_matrix.R | 33ef167cd2e0624c85cb9d840e3425c421dd5a46 | [] | no_license | cran/panelvar | 819f61afaf1c8f5d8672d4504596bc76a9b7f4d6 | fc1f301a808220594b9b7ce7edecb0beaea05f72 | refs/heads/master | 2023-01-22T10:50:46.918230 | 2023-01-05T16:40:05 | 2023-01-05T16:40:05 | 127,956,201 | 7 | 3 | null | null | null | null | UTF-8 | R | false | false | 5,481 | r | pca_transformation_matrix.R | # PCA Transformation Matrix
#
#@param corrected_instruments_F
#@param nof_categories Number of categories
#@param nof_predet_vars Number of predetermined variables
#@param nof_exog_vars Number of exogeneous variables
#@param system_instruments System GMM estimator
#@param system_constant Constant only available with the System GMM estimator in each equation
#@param position_exo TODO
#@param pca_eigenvalue Cut-off eigenvalue for pca analysis
#
#@description PCA transformation of instruments matrix
#@references Roodman
pca_transformation <- function(corrected_instruments_F,
nof_categories,
nof_predet_vars,
nof_exog_vars,
system_instruments,
system_constant,
position_exo,
pca_eigenvalue){
`%ni%`<- Negate(`%in%`)
original_instruments <- corrected_instruments_F
# Remove rows the exogenous instruments:
# Be careful if there are system instruments (the row of 1 in the last.)
if (nof_exog_vars > 0){
# Make sure that "FALSE" is the default value:
if (system_instruments == FALSE){
corrected_instruments_F <-
mapply(function(i) head(original_instruments[[i]], -nof_exog_vars), 1:nof_categories, SIMPLIFY = FALSE)
}
if (system_instruments == TRUE){
# remove the row of 1.
if (system_constant == TRUE){
corrected_instruments_F <-
mapply(function(i) head(original_instruments[[i]], -1),1:nof_categories, SIMPLIFY = FALSE)
}
# now remove the rows of the FD-Exogenous variables:
corrected_instruments_F <-
mapply(function(i) original_instruments[[i]][-(position_exo:(position_exo+nof_exog_vars-1)),],1:nof_categories, SIMPLIFY = FALSE)
}
}
corrected_instruments_F_T <- list()
for (i0 in 1:length(corrected_instruments_F)){
corrected_instruments_F_T[[i0]] <- t(corrected_instruments_F[[i0]])
corrected_instruments_F_T[[i0]] <- corrected_instruments_F_T[[i0]][rowSums(corrected_instruments_F_T[[i0]] != 0) > 0,]
}
corrected_instruments_F_T_pca <- corrected_instruments_F_T[[1]]
if (length(corrected_instruments_F) > 1){
for (i0 in 2:length(corrected_instruments_F)){
corrected_instruments_F_T_pca <- rbind(corrected_instruments_F_T_pca, corrected_instruments_F_T[[i0]])
}
}
# save zero cols:
zero_rows <- unique(which(colSums(corrected_instruments_F_T_pca) == 0))
t_corrected_instruments_F_pca <- corrected_instruments_F_T_pca[,colSums(corrected_instruments_F_T_pca != 0) > 0]
# Remove the row with the constant for the pca analysis if sytsem = TRUE:s
if (system_instruments == TRUE){
zero_rows <- c(zero_rows, ncol(corrected_instruments_F_T_pca))
t_corrected_instruments_F_pca <- t_corrected_instruments_F_pca[,-ncol(t_corrected_instruments_F_pca)]
}
# Use the new t_corrected_instruments_F_pca as input:
ir.pca <- prcomp(t_corrected_instruments_F_pca,
center = TRUE,
scale. = TRUE,
tol = 1e-15)
ir.pca.components <- ir.pca$sdev*ir.pca$sdev
#ir.pca.components
nof_pca_rotation_cols <- length(ir.pca.components[ir.pca.components > pca_eigenvalue ])
# Take care of rows that carry no information in the pca:
# Add them for the matrix multiplication:
if (dim(t_corrected_instruments_F_pca)[2] < dim(corrected_instruments_F_T_pca)[2]){
zero_cols <- dim(corrected_instruments_F_T_pca)[2] - dim(t_corrected_instruments_F_pca)[2]
added.test.rotation <- matrix(0, nrow = nrow(ir.pca$rotation)+zero_cols, ncol = ncol(ir.pca$rotation))
# Add one column because the matrices do not fit otherwise.
not_zero_rows <- c(c(1:dim(corrected_instruments_F_T_pca)[2]) %ni% zero_rows)
# Add row to added.test.rotation at certain positions to solve dimension problem:
added.test.rotation[not_zero_rows,] <- ir.pca$rotation
#added.test.rotation[1:nrow(ir.pca$rotation), 1:ncol(ir.pca$rotation)] <- ir.pca$rotation
}
if (dim(t_corrected_instruments_F_pca)[2] == dim(corrected_instruments_F_T_pca)[2]){
added.test.rotation <- ir.pca$rotation
}
z_test <- list()
for (i0 in 1:length(corrected_instruments_F)){
z_test[[i0]] <- t(as.matrix(corrected_instruments_F[[i0]])) %*% added.test.rotation[,1:nof_pca_rotation_cols]
}
Q <- mapply(function(i) t(z_test[[i]]), 1:nof_categories, SIMPLIFY = FALSE)
# Add instrument for exogenous variables if included:
if (nof_exog_vars > 0){
instruments_exogenous <- mapply(function(i) original_instruments[[i]][(position_exo:(position_exo+nof_exog_vars-1)),],
1:nof_categories, SIMPLIFY = FALSE)
# Add these variables in the correct position.
Q <- mapply(function(i) rbind(t(z_test[[i]]), instruments_exogenous[[i]]), 1:nof_categories, SIMPLIFY = FALSE)
}
# Add the removed colum of the 1s:
if (system_instruments == TRUE){
add_system_instruments_constant <- mapply(function(i) tail(original_instruments[[i]], 1),
1:nof_categories, SIMPLIFY = FALSE)
#Q <- mapply(function(i) rbind(t(z_test[[i]]), add_system_instruments_constant[[i]]), 1:nof_categories, SIMPLIFY = FALSE)
Q <- mapply(function(i) rbind(Q[[i]], add_system_instruments_constant[[i]]), 1:nof_categories, SIMPLIFY = FALSE)
}
return(Q)
} |
c0ac44c706419fb0cb5c1b1771e88155f6abf8a8 | db3a029c4711f7bb2b469d61b3a117687f936161 | /Code/geneticPCA.R | c8f9d7af6754a6ea494692e9551c19c28e7296c9 | [] | no_license | KatiePelletier/WingShapeBSA | 455d0f6750c3ca62aae6b6044d84412ab4909eeb | 42913eb33502269b74c4728f41de18c3203b418c | refs/heads/master | 2023-02-24T09:56:51.615109 | 2023-02-14T14:32:03 | 2023-02-14T14:32:03 | 236,039,708 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,327 | r | geneticPCA.R | #trying to run SMARTPCA for the paper. Using the smartsnp package.
library(smartsnp)
library(tidyverse)
crap <- fread("../Data/wildpops_controlPool.snp")
# pathToGenoFile = system.file("extdata", "dataSNP", package = "smartsnp")
#
# my_groups <- as.factor(c(rep("A", 50), rep("B", 50))); cols = c("red", "blue")
#
# groups <- c(1,2,3,4)
#
# mvaR <- smart_mva(snp_data = crap, sample_group = groups)
# mvaR$pca$pca.eigenvalues # extract PCA eigenvalues
# mvaR$pca$pca.snp_loadings # extract principal coefficients (SNP loadings)
# mvaR$pca$pca.sample_coordinates # extract PCA principal components (sample position in PCA space)
#
# plot(mvaR$pca$pca.sample_coordinates[,c("PC1","PC2")], cex = 2,
# pch = 19, col = cols[my_groups], main = "genotype smartpca")
# legend("topleft", legend = levels(my_groups), cex = 1,
# pch = 19, col = cols, text.col = cols)
#I think I want to scale these so every SNP 'votes' equally? I think that is what I am doing there? Ask Ian?
geneticpca <- prcomp(t(crap), center = TRUE, scale = TRUE)
shit <- data.frame(geneticpca[["x"]])
shit$pop <- c("CMO", "FVW13", "FVW14", "PHO")
png("morecrap_forID.png")
ggplot(shit, aes(x = PC1, y = PC2, col = pop)) +
geom_point()+
labs(col = "Population")
dev.off()
ggplot(shit, aes(x = PC3, y = PC4, col = pop)) +
geom_point()
|
1a8e77bc07a23cb20c7b58eca451f13e4dbb50c2 | c44fd560a56404c54713849ecbd1cdf6a93ab10e | /specdata/pollutantmean.R | d2664490aa2d1e974e636de890ea2477b36e2418 | [] | no_license | tensoriel/GettingCleaningData | e4c3def9d568a2d53f099a5b43f78d2813cacdbb | 7aef5afb23effd92adf671b758e160244655a14e | refs/heads/master | 2016-09-10T14:08:34.430766 | 2014-09-08T23:10:50 | 2014-09-08T23:10:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 545 | r | pollutantmean.R | pollutantmean <- function(directory, pollutant, id=1:332) {
files_list <- list.files(directory, full.names=TRUE) #creates a list of files
dat <- data.frame() #creates an empty data frame
for (i in id) { #loops through the files, rbinding them together
dat <- rbind(dat, read.csv(files_list[i]))
}
dat_subset <- dat[, pollutant] #subsets the rows that match the 'day' argument
mean(dat_subset, na.rm=TRUE) #identifies the median of the subset
}
|
9c43d60cdebab25b981b4749b389441b486ef3e3 | 6d5a1542336fa6e14bf44121529c7b1a900d60f0 | /opt/gurobi501/linux64/examples/R/qcp.R | d931167500e489777fdb893f020586eed596ee20 | [] | no_license | revisalo/cr2 | 35566874efe3459147635c29106d7b0a7fcd0a02 | e7b3d5993d4ee0901b138e52388b146103001d51 | refs/heads/master | 2020-12-30T09:26:10.284537 | 2012-11-14T14:45:20 | 2012-11-14T14:45:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 749 | r | qcp.R | # Copyright 2012, Gurobi Optimization, Inc.
#
# This example formulates and solves the following simple QCP model:
# minimize
# x^2 + x*y + y^2 + y*z + z^2
# subject to
# x + 2 y + 3z >= 4
# x + y >= 1
# t = 0.7071
# [ x ^ 2 + y ^ 2 - t ^ 2 ] < = 0 (a second-order cone constraint)
library("gurobi")
model <- list()
model$A <- matrix(c(1,2,3,0,1,1,0,0,0,0,0,1), nrow=3, byrow=T)
model$Q <- matrix(c(2,1,0,0,1,2,1,0,0,1,2,0,0,0,0,0), nrow=4, byrow=T)
model$cones <- list(list(4,1,2))
model$obj <- c(1,2,3,0)
model$rhs <- c(4,1,0.717067811)
model$sense <- c('>=', '>=', '=')
result <- gurobi(model)
print(result$objval)
print(result$x)
|
ffde1a245443a97da64e3358f0ce508687ddacaf | 2d34708b03cdf802018f17d0ba150df6772b6897 | /googleadexchangebuyerv13.auto/man/Account.Rd | ac2777e29f37d636b096ddf97abf09dff5e682aa | [
"MIT"
] | permissive | GVersteeg/autoGoogleAPI | 8b3dda19fae2f012e11b3a18a330a4d0da474921 | f4850822230ef2f5552c9a5f42e397d9ae027a18 | refs/heads/master | 2020-09-28T20:20:58.023495 | 2017-03-05T19:50:39 | 2017-03-05T19:50:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,848 | rd | Account.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adexchangebuyer_objects.R
\name{Account}
\alias{Account}
\title{Ad Exchange Buyer API Objects
Accesses your bidding-account information, submits creatives for validation, finds available direct deals, and retrieves performance reports.}
\usage{
Account(Account.bidderLocation = NULL, bidderLocation = NULL,
cookieMatchingNid = NULL, cookieMatchingUrl = NULL, id = NULL,
maximumActiveCreatives = NULL, maximumTotalQps = NULL,
numberActiveCreatives = NULL)
}
\arguments{
\item{Account.bidderLocation}{The \link{Account.bidderLocation} object or list of objects}
\item{bidderLocation}{Your bidder locations that have distinct URLs}
\item{cookieMatchingNid}{The nid parameter value used in cookie match requests}
\item{cookieMatchingUrl}{The base URL used in cookie match requests}
\item{id}{Account id}
\item{maximumActiveCreatives}{The maximum number of active creatives that an account can have, where a creative is active if it was inserted or bid with in the last 30 days}
\item{maximumTotalQps}{The sum of all bidderLocation}
\item{numberActiveCreatives}{The number of creatives that this account inserted or bid with in the last 30 days}
}
\value{
Account object
}
\description{
Auto-generated code by googleAuthR::gar_create_api_objects
at 2017-03-05 19:20:06
filename: /Users/mark/dev/R/autoGoogleAPI/googleadexchangebuyerv13.auto/R/adexchangebuyer_objects.R
api_json: api_json
}
\details{
Objects for use by the functions created by googleAuthR::gar_create_api_skeleton
Account Object
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Configuration data for an Ad Exchange buyer account.
}
\seealso{
Other Account functions: \code{\link{Account.bidderLocation}},
\code{\link{accounts.patch}},
\code{\link{accounts.update}}
}
|
de7a229786457aab7b521e97afae95496d1db4c4 | 88f22a2d2341215dacb12df2c36bbf263962e2d3 | /scripts/get_pred_stat.r | 5cd35056b872bc7069414fcdbff7b653db02fd09 | [] | no_license | pdworzynski/dropEst | a5e3c79ea28382c65f54626cb202bab1ea9ba5a5 | add6588a1cd13cd8eb809abc70da66354c005c1b | refs/heads/master | 2021-01-25T00:33:08.093011 | 2018-02-28T15:21:38 | 2018-02-28T15:21:38 | 123,300,878 | 0 | 0 | null | 2018-02-28T15:03:38 | 2018-02-28T15:03:37 | null | UTF-8 | R | false | false | 420 | r | get_pred_stat.r | #!/usr/bin/env Rscript
umigs_count <- c(1, 2, 3)
reads_count <- c(4, 5, 6)
for (i in 0:99) {
filename <- paste0('SRR1784310_cv_', i, '_0.rds')
rds <- readRDS(filename)
umigs_count <- c(umigs_count, length(rds$reads_by_umig))
reads_count <- c(reads_count, sum(rds$reads_by_umig))
print(i)
}
df <- cbind.data.frame("UmigsCount" = umigs_count, "ReadsCount" = reads_count)
saveRDS(df, file = "pred_stats.rds")
|
e2408585302ca0e3c242d3d11114d7c2450a19cf | 03724428d202ac5139940bbcaa0f9808ef6641b8 | /Practicas-R/Distancias/distancias.r | f62ce906d205aab9b60c8993e630a44bdc934803 | [] | no_license | R0beat/R | ee8c0e32eac08015e8437e1460367f4a8d1a7255 | b8d7df3548a3e52d793bc1e38aa5fb379bc22c38 | refs/heads/master | 2023-06-11T20:51:32.286181 | 2021-06-22T14:37:21 | 2021-06-22T14:37:21 | 331,485,402 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 420 | r | distancias.r | distancias1 <- read.csv('./distancias_mex1.csv',header=FALSE)
distancias2 <- read.csv('./distancias_mex2.csv',header=FALSE)
distancias3 <- read.csv('./distancias_mex2.csv',header=FALSE)
data <- rbind(distancias1,distancias2,distancias3)
################################################
set.seed(2020)
colnames(data)=data[,1]
data <- data[2:1992,]
muestra = sample(1:1991,100)
data1 = data[muestra,muestra+1]
View(data1) |
970d568d11879c1a202587a34b7a51617ed93a8d | 60e8554041e3c3d75a6afcaf9dd3b5b053cd70d1 | /R/gui-functions.R | 407931cd1c2b34d8812254cf5d79ec96ce1d8b59 | [] | no_license | dkidney/gibbonsecr | 34805be54baa87642ef10a850478cc069eff69e3 | 93b6be93bd3392ef4668511f40f2a7c139c13b7a | refs/heads/master | 2021-05-16T02:23:40.499150 | 2019-01-27T20:24:07 | 2019-01-27T20:24:07 | 41,083,427 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 21,258 | r | gui-functions.R | ## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
# centre a tcl window
center_window = function(tt, w = 100, h = 100){
# w = as.numeric(tclvalue(tkwinfo("width", tt)))
# h = as.numeric(tclvalue(tkwinfo("height", tt)))
scrw = as.numeric(tclvalue(tkwinfo("screenwidth", tt)))
scrh = as.numeric(tclvalue(tkwinfo("screenheight", tt)))
tkwm.geometry(tt, paste0(w, "x", h, "+", round((scrw - w) / 2), "+",
round((scrh - h) / 2)))
}
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
# Popup menu to allow user to choose classes of spatial covariates when
# importing shapefiles
# Example
# x = data.frame(
# colours = c("red", "green", "blue"),
# fruits = c("Apple", "Orange", "Banana", "Pear", "Cherry", "eggs"),
# names = c("Alice","Darren"),
# numbers = 1:6
# )
# str(x)
# classes = check_covariate_classes(x)
# print(classes)
check_covariate_classes = function(x, padx = 1){
# main window ------------------------------------------------------------ #
tt = tktoplevel()
tkwm.title(tt, "Check covariate classes")
center_window(tt, w = 150)
tcl("wm", "attributes", tt, topmost = TRUE)
tcl("wm", "attributes", tt, topmost = FALSE)
tkwm.geometry(tt, "")
tkfocus(tt)
main = ttkframe(tt)
# upper frame for entry and combo boxes ---------------------------------- #
upper = ttkframe(main, padding = c(5,5))
tkgrid(ttklabel(upper, text = "Use"), row = 0, column = 1, sticky = "w")
tkgrid(ttklabel(upper, text = "Name"), row = 0, column = 2, sticky = "w")
tkgrid(ttklabel(upper, text = "Type"), row = 0, column = 3, sticky = "w")
tkgrid(ttklabel(upper, text = "Center"), row = 0, column = 4, sticky = "w")
tkgrid(ttklabel(upper, text = "Scale"), row = 0, column = 5, sticky = "w")
tkgrid(ttklabel(upper, text = "Log"), row = 0, column = 6, sticky = "w")
# 'use' checkbox
use.tvar = list()
use = list()
use_command_base = function(j){
state = if(tclvalue(use.tvar[[j]]) == "1") "normal" else "disabled"
tkconfigure(entry[[j]], state = state)
tkconfigure(combo[[j]], state = state)
tkconfigure(center[[j]], state = state)
tkconfigure(scale[[j]], state = state)
tkconfigure(log[[j]], state = state)
}
use_command = function(j){
eval(parse(text = paste0("function() use_command_base(", j, ")")))
}
# combobox
combo.char = ifelse(sapply(x, is.numeric), "number", "category")
combo.tvar = list()
combo = list()
combo_command_base = function(j){
state = if(tclvalue(combo.tvar[[j]]) == "number") "normal" else "disabled"
tkconfigure(center[[j]], state = state)
tkconfigure(scale[[j]], state = state)
tkconfigure(log[[j]], state = state)
}
combo_command = function(j){
eval(parse(text = paste0("function() combo_command_base(", j, ")")))
}
# entrybox
entry.char = colnames(x)
entry.tvar = list()
entry = list()
# center checkbox
center.tvar = list()
center = list()
# scale checkbox
scale.tvar = list()
scale = list()
# log checkbox
log.tvar = list()
log = list()
# make and pack
for(j in 1:ncol(x)){ # j=1
use.tvar[[j]] = tclVar(1)
use[[j]] = ttkcheckbutton(upper, variable = use.tvar[[j]],
state = "normal", command = use_command(j))
tkgrid(use[[j]], row = j, column = 1)
entry.tvar[[j]] = tclVar(entry.char[j])
entry[[j]] = ttkentry(upper, textvariable = entry.tvar[[j]], width = 30,
state = "normal")
tkgrid(entry[[j]], row = j, column = 2)
combo.tvar[[j]] = tclVar(combo.char[j])
combo[[j]] = ttkcombobox(upper, textvariable = combo.tvar[[j]], width = 10,
state = "normal", values = c("number", "category"))
tkbind(combo[[j]], "<<ComboboxSelected>>", combo_command(j))
tkgrid(combo[[j]], row = j, column = 3)
state = if(tclvalue(combo.tvar[[j]]) == "number") "normal" else "disabled"
center.tvar[[j]] = tclVar(0)
center[[j]] = ttkcheckbutton(upper, variable = center.tvar[[j]],
state = state)
tkgrid(center[[j]], row = j, column = 4)
scale.tvar[[j]] = tclVar(0)
scale[[j]] = ttkcheckbutton(upper, variable = scale.tvar[[j]],
state = state)
tkgrid(scale[[j]], row = j, column = 5)
log.tvar[[j]] = tclVar(0)
log[[j]] = ttkcheckbutton(upper, variable = log.tvar[[j]],
state = state)
tkgrid(log[[j]], row = j, column = 6)
}
# lower frame for buttons ------------------------------------------------ #
lower = ttkframe(main, padding = c(5,5))
done = tclVar(0)
ok = ttkbutton(
lower, text = "OK", state = "normal", width = 10, command = function(){
names = sapply(entry.tvar, tclvalue)
duplicates = duplicated(names)
if(any(duplicates)){
tkmessageBox(title = "Error", icon = "error", type = "ok",
message = "Duplicate names not allowed")
stop(.call = FALSE)
}
tclvalue(done) = 1
}
)
cancel = ttkbutton(lower, text = "Cancel", state = "normal", width = 10,
command = function() tclvalue(done) = 2)
# packing etc. ----------------------------------------------------------- #
tkgrid(ok, cancel, padx = padx)
tkpack(upper)
tkpack(lower)
tkpack(main)
tkwm.resizable(tt, 0, 0)
tkwm.protocol(tt, "WM_DELETE_WINDOW", function() tclvalue(done) = 2)
tkwait.variable(done)
# return clasess --------------------------------------------------------- #
result = if(tclvalue(done) == 1){
data.frame(
use = sapply(use.tvar, tclvalue) == "1",
name = sapply(entry.tvar, tclvalue),
class = sapply(combo.tvar, tclvalue),
center = sapply(center.tvar, tclvalue) == "1",
scale = sapply(scale.tvar, tclvalue) == "1",
log = sapply(log.tvar, tclvalue) == "1",
stringsAsFactors = FALSE
)
}else{
data.frame(
use = rep(TRUE, ncol(x)),
name = entry.char,
class = combo.char,
center = rep(TRUE, ncol(x)),
scale = rep(TRUE, ncol(x)),
log = rep(TRUE, ncol(x)),
stringsAsFactors = FALSE
)
}
tkdestroy(tt)
return(result)
}
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
# popup window to allow user to choose covariate values when making model
# predictions and plotting detfunc, bearings and distances
choose_newdata = function(fit, submodels = NULL, all = TRUE, padx = 1){
if(is.null(submodels))
submodels = names(fit$parindx) ; submodels
if(length(do.call(c, lapply(fit$model[submodels], all.vars))) == 0)
return(NULL)
# toplevel window and main frame ----------------------------------------- #
tt = tktoplevel()
tkwm.title(tt, "Choose prediction data")
center_window(tt)
tcl("wm", "attributes", tt, topmost = TRUE)
tcl("wm", "attributes", tt, topmost = FALSE)
tkwm.geometry(tt, "")
tkfocus(tt)
main = ttkframe(tt)
# upper frame for choosing covariate values ------------------------------ #
upper = ttkframe(main, padding = c(5,5))
tkgrid(ttklabel(upper, text = "Name"), row = 0, column = 1, sticky = "w")
tkgrid(ttklabel(upper, text = "Value(s)"), row = 0, column = 2, sticky = "w")
# summarise covariates --------------------------------------------------- #
covnames = sapply(submodels, function(i){ # submodel = "sigma"
bigmf = do.call(rbind, lapply(fit$model.frames, function(x) x[[i]]))
sapply(bigmf, function(x){
if(inherits(x, "factor")) levels(x) else (range(x))
}, simplify = FALSE)
}, simplify = FALSE)
covnames = do.call(c, unname(covnames))
covnames = covnames[!duplicated(names(covnames))]
covnames = covnames[order(names(covnames))]
# add a combo / entry for each covariate --------------------------------- #
box = boxvar = list()
for(i in names(covnames)){
if(inherits(covnames[[i]], "character")){
boxvar[[i]] = tclVar(covnames[[i]][1])
values = if(all) c(covnames[[i]], "all") else covnames[[i]]
box[[i]] = ttkcombobox(parent = upper,
textvariable = boxvar[[i]],
values = values,
width = 30)
}else{
values[[i]] = tclVar(mean(covnames[[i]]))
box[[i]] = ttkentry(parent = upper,
textvariable = values[[i]],
width = 30)
}
row = which(names(covnames) == i)
tkgrid(ttklabel(upper, text = i), row = row, column = 1, sticky = "w")
tkgrid(box[[i]], row = row, column = 2, sticky = "w")
}
# lower frame for buttons ------------------------------------------------ #
lower = ttkframe(main, padding = c(5,5))
done = tclVar(0)
ok = ttkbutton(
lower, text = "OK", state = "normal", width = 10,
command = function(){
# throw error if numeric variable outside observed range
for(i in names(covnames)){
if(!inherits(covnames[[i]], "character")){
x = as.numeric(tclvalue(boxvar[[i]]))
if(x < covnames[[i]][1] || x > covnames[[i]][2]){
tkmessageBox(title = "Error", icon = "error", type = "ok",
message = paste("value for", i,
"is outside observed range"))
stop(.call = FALSE)
}
}
}
tclvalue(done) = 1
}
)
cancel = ttkbutton(lower, text = "Cancel", state = "normal", width = 10,
command = function() tclvalue(done) = 2)
# packing etc. ----------------------------------------------------------- #
tkgrid(ok, cancel, padx = padx)
tkpack(upper)
tkpack(lower)
tkpack(main)
tkwm.resizable(tt, 0, 0)
tkwm.protocol(tt, "WM_DELETE_WINDOW", function() tclvalue(done) = 2)
tkwait.variable(done)
# return newdata --------------------------------------------------------- #
newdata = if(tclvalue(done) == "1"){
do.call(expand.grid, sapply(names(covnames), function(i){
if(inherits(covnames[[i]], "character")){
if(tclvalue(boxvar[[i]]) == "all"){
factor(covnames[[i]], levels = covnames[[i]])
}else{
factor(tclvalue(boxvar[[i]]), levels = covnames[[i]])
}
}else{
as.numeric(tclvalue(boxvar[[i]]))
}
}, simplify = FALSE))
}else{
NULL
}
tkdestroy(tt)
return(newdata)
}
# ## -------------------------------------------------------------------------- ##
# ## -------------------------------------------------------------------------- ##
#
# # popup menu for choosing the array when plotting density surface or detection surface
# # need to replace this with an option to choose array-level covariates
#
# choose_array = function(x, padx = 1){
#
# if(inherits(x, "gsecr"))
# x = x$capthist
# if(!inherits(x, "capthist"))
# stop("capthist object required")
#
# tt = tktoplevel()
# tkwm.title(tt)
# # w = 100
# # h = 100
# # scrw = as.numeric(tclvalue(tkwinfo("screenwidth", tt)))
# # scrh = as.numeric(tclvalue(tkwinfo("screenheight", tt)))
# # tkwm.geometry(tt, paste0(w, "x", h, "+", round((scrw - w) / 2), "+",
# # round((scrh - h) / 2)))
# center_window(tt)
# tcl("wm", "attributes", tt, topmost = TRUE)
# tcl("wm", "attributes", tt, topmost = FALSE)
# tkwm.geometry(tt, "")
# tkfocus(tt)
# main = ttkframe(tt)
#
# ##################################################
# ## upper frame for entry and combo boxes
#
# upper = ttkframe(main, padding = c(5,5))
# tkgrid(ttklabel(upper, text = "Choose array: "), row = 1, column = 1,
# sticky = "w")
# sessions = session(x)
# combo.tvar = tclVar(sessions[1])
# combo = ttkcombobox(upper, textvariable = combo.tvar, width = 10,
# state = "normal", values = sessions)
# tkgrid(combo, row = 1, column = 2)
#
# ##################################################
# ## lower frame for buttons
#
# lower = ttkframe(main, padding = c(5,5))
# done = tclVar(0)
# ok = ttkbutton(lower, text = "OK", state = "normal", width = 10,
# command = function() tclvalue(done) = 1)
# cancel = ttkbutton(lower, text = "Cancel", state = "normal", width = 10,
# command = function() tclvalue(done) = 2)
#
# ##################################################
# # packing etc.
#
# tkgrid(ok, cancel, padx = padx)
# tkpack(upper)
# tkpack(lower)
# tkpack(main)
# tkwm.resizable(tt, 0, 0)
# tkwm.protocol(tt, "WM_DELETE_WINDOW", function() tclvalue(done) = 2)
# tkwait.variable(done)
#
# ##################################################
# # return clasess
#
# result = if(tclvalue(done) == 1) tclvalue(combo.tvar) else NA
#
# tkdestroy(tt)
# return(result)
#
# }
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
# appearance setting for tcltk widgets
# .Tcl("ttk::style theme names")
# sort(as.character(tkfont.families()))
gui_appearance_settings = function(){
.Tcl(paste("source", system.file("extdata/tcl/gibbonsecr_theme.tcl",
package = "gibbonsecr")))
general = list(
WIDTH = 1150,
HEIGHT = 650,
frame.padding = c(5,5),
console.fg = "grey80",
console.bg = "grey15",
relief = "flat"
)
ggplot_theme = theme_bw(base_family = 'Helvetica') +
theme(
axis.title.x = element_text(margin = margin(t = 10)),
axis.title.y = element_text(margin = margin(r = 10)),
plot.margin = unit(c(0.25,1,0.25,0.25), units = "lines"),
legend.key = element_blank()
)
if(.Platform$OS.type == "windows"){
# windows settings --------------------------------------------------- #
specific = list(
min.height = 430,
min.width = 355,
lhs.width = 350,
rhs.width = general$WIDTH - 350,
button.width = 9,
combo.width = 15,
entry.width = 16,
csv.entry.width = 28,
fixed.entry.width = 6,
formula.entry.width = 25,
grid.padx = 1,
grid.pady = 1,
normal.font = tkfont.create(size = 10, family = "Trebuchet MS"),
heading.font = tkfont.create(size = 10, family = "Trebuchet MS",
slant = "roman", weight = "bold"),
console.normal.font = tkfont.create(size = 9, family = "Lucida Console"),
console.heading.font = tkfont.create(size = 10, family = "Lucida Console"),
console.bold.font = tkfont.create(size = 9, family = "Lucida Console",
slant = "roman")
)
.Tcl("ttk::style configure TButton -padding {0 1}")
.Tcl("ttk::style configure TCombobox -padding {5 2}")
.Tcl("ttk::style configure TEntry -padding {5 2}")
.Tcl("ttk::style configure TNotebook.Tab -padding {1 1 1 1}")
.Tcl("ttk::style map TNotebook.Tab -padding [list selected {2 3 2 3}]")
ggplot_theme = ggplot_theme +
theme(
text = element_text(size = 12)
)
}else{
# mac settings ------------------------------------------------------- #
specific = list(
min.height = 500,
min.width = 415,
lhs.width = 410,
rhs.width = general$WIDTH - 410,
button.width = 8,
combo.width = 12,
entry.width = 15,
csv.entry.width = 25,
fixed.entry.width = 6,
formula.entry.width = 22,
grid.padx = 2,
grid.pady = 2,
heading.font = tkfont.create(size = 10, family = "Lucida Grande",
slant = "roman", weight = "bold"),
console.normal.font = tkfont.create(size = 10, family = "Courier New",
weight = "bold"),
console.heading.font = tkfont.create(size = 11, family = "Courier New",
weight = "bold"),
console.bold.font = tkfont.create(size = 10, family = "Courier New", weight = "bold")
)
.Tcl("ttk::style configure TButton -padding {0 3}")
.Tcl("ttk::style configure TEntry -padding {5 4}")
.Tcl("ttk::style configure TCombobox -padding {5 4}")
.Tcl("ttk::style configure TNotebook.Tab -padding {2 2 2 2}")
.Tcl("ttk::style map TNotebook.Tab -padding [list selected {3 4 3 4}]")
ggplot_theme = ggplot_theme +
theme(
text = element_text(size = 14)
)
}
settings = c(general, specific)
settings$theme = ggplot_theme
return(settings)
}
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
# welcome message to appear on GUI console on startup
# code adadpted from utils::sessionInfo to get os version (running)
welcome_message = function(){
if(.Platform$OS.type == "windows") {
running = win.version()
}else if(nzchar(Sys.which("uname"))){
uname = system("uname -a", intern = TRUE)
os = sub(" .*", "", uname)
running = switch(os, Linux = if (file.exists("/etc/os-release")) {
tmp = readLines("/etc/os-release")
t2 = if (any(grepl("^PRETTY_NAME=", tmp))){
sub("^PRETTY_NAME=", "", grep("^PRETTY_NAME=", tmp, value = TRUE)[1L])
}else if(any(grepl("^NAME", tmp))){
sub("^NAME=", "", grep("^NAME=", tmp, value = TRUE)[1L])
}else{
"Linux (unknown distribution)"
}
sub("\"(.*)\"", "\\1", t2)
} else if (file.exists("/etc/system-release")) {
readLines("/etc/system-release")
}, Darwin = {
ver = readLines("/System/Library/CoreServices/SystemVersion.plist")
ind = grep("ProductUserVisibleVersion", ver)
ver = ver[ind + 1L]
ver = sub(".*<string>", "", ver)
ver = sub("</string>$", "", ver)
ver1 = strsplit(ver, ".", fixed = TRUE)[[1L]][2L]
sprintf("OS X %s (%s)", ver,
switch(ver1, `4` = "Tiger",
`5` = "Leopard", `6` = "Snow Leopard", `7` = "Lion",
`8` = "Mountain Lion", `9` = "Mavericks", `10` = "Yosemite",
`11` = "El Capitan", "unknown"))
}, SunOS = {
ver = system("uname -r", intern = TRUE)
paste("Solaris", strsplit(ver, ".", fixed = TRUE)[[1L]][2L])
}, uname)
}
paste0("\n",
"Welcome to gibbonsecr version ", utils::packageVersion("gibbonsecr"), "\n\n",
R.Version()$version, "\n", running, "\n\n",
stringr::str_wrap("This software was developed in partnership with the IUCN SSC Primate Specialist Group Section on Small Apes and the Centre for Research into Ecological and Environmental Modelling (CREEM) at the University of St Andrews, UK.", 60), "\n\n",
stringr::str_wrap("To view the online manual go to Help > User manual, or visit 'http://dkidney.github.io/gibbonsecr'.", 60),
"\n\n",
stringr::str_wrap("This is a pre-release version of the software. If you notice any bugs or have any general queries, please email Darren Kidney at 'darrenkidney@googlemail.com'.", 60),
"\n\n"
)
}
|
1883484f10604650f8da5f2c759b8234005f910c | a27bb0900bd6279e1eb919cd97ef7c90f2a03615 | /Rcode/IPMs (modified from ShethAngertPNAS)/02_Estimate_lambda_IPM.R | c6d2486f3b49b4d35da500940549f0038b1d1c9f | [] | no_license | mattjbayly/transplant_analysis | 0bbb7090121f750d5b72c0c9911e1679ab40b02a | 56fc391f061e0a46d08556d9c87ffa2ca7ede3f7 | refs/heads/master | 2021-03-27T19:57:01.807027 | 2019-04-16T14:22:31 | 2019-04-16T14:22:31 | 115,833,660 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,830 | r | 02_Estimate_lambda_IPM.R | #### PROJECT: Mimulus cardinalis northern transplant 2015-2016
#### PURPOSE: Create data frame of vital rate parameters and build integral projection models
############# Obtain estimates of lambda for each transplant site
#### AUTHOR: modified from Seema Sheth (Sheth & Angert 2018 PNAS)
#### DATE LAST MODIFIED: 20180224
# remove objects and clear workspace
rm(list = ls(all=TRUE))
# require packages
require(lme4)
require(glmmADMB)
require(plyr)
require(dplyr)
require(tidyverse)
#*******************************************************************************
#### 1. Import and format data ###
#*******************************************************************************
# these data are used to fill in the gap from fruits to recruits
# from Sheth and Angert 2018 from survey of natural populations
# individual-level data
# run data prep script to clean up and import data
source("Rcode/data_prep.R")
head(demo.data)
head(site_fruit_count_data)
# filter to northern sites only
demo.dat.north <- demo.data %>%
filter(Site=="Rock Creek"|Site=="Canton Creek"|Site=="Coast Fork of Williamette") %>%
droplevels()
head(demo.dat.north)
site_fruit_count_north <- site_fruit_count_data %>%
filter(Site=="Rock Creek"|Site=="Canton Creek"|Site=="Coast Fork of Williamette") %>%
droplevels()
head(site_fruit_count_north)
# seeds per fruit
seeds <- read_csv("Data/Amy_wild_demo_data/fec2.seed.per.fruit.2010.2011.2012.csv")
head(seeds)
# filter to northern sites only
seeds.north <- seeds %>%
filter(site=="Coast Fork of Williamette"|site=="Rock Creek"|site=="Canton Creek") %>%
droplevels()
# these are Matt's transplant data
# need them here to get site name vector
# also, they are used by source file 'integral_projection_model.R' below
matt_dat <- read_csv("Data/IPMData_transplant.csv")
# log-transform size
matt_dat <- matt_dat %>% mutate(logSize = log(z), logSizeNext = log(z1), Fec0 = Repr, Fec1 = Fec)
# make sure plot, site, and year are recognized as factors
matt_dat$PlotID = as.factor(matt_dat$PlotID)
matt_dat$SiteID = as.factor(matt_dat$SiteID)
matt_dat$Year = as.factor(matt_dat$Year)
#*******************************************************************************
#### 2. Read in global survival, growth and fecundity models using data from all sites ###
#*******************************************************************************
# Create a vector of unique Site names for subsetting (n=8)
site=unique(matt_dat$SiteID)
# Set up data frame of model parameters
params=c()
#*******************************************************************************
### 3A. Survival ###
#*******************************************************************************
# Read in top survival model output (Formula: Surv ~ logSize + Site + (1 | Plot))
surv.reg=load("Robjects/surv.reg.rda")
# Get model coefficients
fixef(s6)
# Store model coefficients
params$site=site
params$surv.globint=fixef(s6)[1]
params$surv.siteint=c(0,fixef(s6)[3:9])
params$surv.slope=fixef(s6)[2]
params$surv.int = params$surv.globint + params$surv.siteint
#*******************************************************************************
### 3B. Growth ###
#*******************************************************************************
# Read in top growth model output (Formula: logSizeNext ~ logSize + Site + (1 | Plot)
growth.reg=load("Robjects/growth.reg.rda")
# Get model coefficients
fixef(g6)
# Store model coefficients
params$growth.globint=fixef(g6)[1]
params$growth.siteint=c(0,fixef(g6)[3:9])
params$growth.slope=fixef(g6)[2]
params$growth.sd=rep(sigma(g6),times=length(site))
params$growth.int = params$growth.globint + params$growth.siteint
#*******************************************************************************
### 3C. Flowering ###
#*******************************************************************************
# Read in top flowering model output (Formula: Fec0 ~ logSize + Site + (1 | Plot))
flowering.reg=load("Robjects/flowering.reg.rda")
# Store model coefficients
params$flowering.globint=fixef(fl6)[1]
params$flowering.siteint=c(0,fixef(fl6)[3:9])
params$flowering.slope=fixef(fl6)[2]
params$flowering.int = params$flowering.globint + params$flowering.siteint
#*******************************************************************************
### 3D. Fruit number (untransformed) using negative binomial regression ###
#*******************************************************************************
# Read in top model output for fruit.reg (Formula: Fec0 ~ logSize + Site + (1 | Plot) + (1|Year))
fruit.reg=load("Robjects/fruit.reg.rda")
# Store model coefficients
params$fruits.globint=fixef(fr15)[1]
params$fruits.siteint=c(0,fixef(fr15)[3:9])
params$fruits.slope=fixef(fr15)[2]
params$fruits.int = params$fruits.globint + params$fruits.siteint
#*******************************************************************************
### 3E. Create data frame of site-specific parameter estimates ###
#*******************************************************************************
params=data.frame(params)
#*******************************************************************************
### 3F. Number of seeds per fruit (constant across sites) ###
#*******************************************************************************
seeds.north.mean <- seeds.north %>%
summarize(seeds.per.fruit = mean(newgrandmean))
params$seeds.per.fruit = seeds.north.mean$seeds.per.fruit
#*******************************************************************************
### 3G. Establishment probability (constant across sites) ###
#*******************************************************************************
# Obtain number of new recruits per 3 northern sites
recruit.number.per.site=tapply(demo.dat.north$logSizeNext[is.na(demo.dat.north$logSize)],demo.dat.north$Site[is.na(demo.dat.north$logSize)],FUN="length") %>% data.frame()
colnames(recruit.number.per.site)="recruit.number"
# Obtain total fruit count per 3 northern sites
fruits.per.site=tapply(demo.dat.north$Fec1[!is.na(demo.dat.north$Fec1)],demo.dat.north$Site[!is.na(demo.dat.north$Fec1)],sum)
# Obtain total seed count per site (= # fruits per site * # seeds per fruit per site)
total.seeds.per.site=fruits.per.site*seeds.north$newgrandmean
# Estimate establishment probability as # of new recruits/# of seeds
params$establishment.prob=mean(recruit.number.per.site$recruit.number/total.seeds.per.site)
#*******************************************************************************
### 3H. Size distribution of recruits (constant across sites) ###
#*******************************************************************************
recruit.size.mean=mean(demo.dat.north$logSizeNext[is.na(demo.dat.north$logSize)])
recruit.size.sd=sd(demo.dat.north$logSizeNext[is.na(demo.dat.north$logSize)])
params$recruit.logSize.mean=recruit.size.mean
params$recruit.logSize.sd=recruit.size.sd
#### Store parameters in .csv file for later use
write.csv(params,"Robjects/vital_rate_coefficients.csv",row.names=FALSE)
#*******************************************************************************
### 4. Create site-specific IPMs parameterized by site-specific parameters derived from global vital rates models
#*******************************************************************************
#*******************************************************************************
### 4A. Subset data for site f
#*******************************************************************************
# create empty vectors for lambda and site to be filled
lambda=c()
Site=character()
for (f in 1:length(site)) {
data1=subset(matt_dat,SiteID==site[f])
params1=subset(params,site==site[f])
params1=subset(params1,select=-c(site,surv.globint, surv.siteint, growth.globint, growth.siteint, flowering.globint, flowering.siteint, fruits.globint, fruits.siteint))
#*******************************************************************************
### 4B. Create survival, growth, and fecundity functions and build IPM by running integral_projection_model.R script
#*******************************************************************************
source("Rcode/integral_projection_model.R")
#*******************************************************************************
### 4C. Obtain lambda estimate for site f
#*******************************************************************************
lambda[f] <- Re(eigen(K)$values[1])
Site[f]=as.character(site[f])
} # end loop to run IPMs and estimate lambdas for each site
# make data frame of site and lambda
site.lambda=data.frame(Site,lambda)
#*******************************************************************************
### 5. Merge site information with vital and lambda estimates and save to .csv files
#*******************************************************************************
# Read in site info
site.info=read_csv("Data/raw_data/WunderGround/sites.csv")
# merge site info with lambda estimates
site.lambda=left_join(site.lambda, site.info, by=c("Site" = "ID2"))
site.vitals=left_join(params, site.info, by=c("site" = "ID2"))
plot(lambda ~ lat, data=site.lambda) # looks qualitatively the same as in Matt's thesis
plot(surv.int ~ lat, data=site.vitals) #
plot(growth.int ~ lat, data=site.vitals) #
plot(flowering.int ~ lat, data=site.vitals) #
plot(fruits.int ~ lat, data=site.vitals) #
# save to .csv file
write.csv(site.lambda,"Robjects/site.lambda.csv",row.names=FALSE)
write.csv(site.vitals,"Robjects/site.vitals.csv",row.names=FALSE)
|
994d22f8e88ddf9c0daf6640aa7831e4d2a1439d | d33d59cc443f48e4477793a38bdfd04f833995a5 | /test/internals/libproc/tst.plddGrabPIE32.r | 7c77a2dfe69df88dbfc8ad5461a6a2ac67047fe8 | [
"UPL-1.0"
] | permissive | oracle/dtrace-utils | a1b1c920b4063356ee1d77f43a1a0dd2e5e963c3 | b59d10aa886c2e5486f935c501c3c160a17fefa7 | refs/heads/dev | 2023-09-01T18:57:26.692721 | 2023-08-30T21:19:35 | 2023-08-31T20:40:03 | 124,185,447 | 80 | 20 | NOASSERTION | 2020-12-19T13:14:59 | 2018-03-07T05:45:16 | C | UTF-8 | R | false | false | 344 | r | tst.plddGrabPIE32.r | : dyn {ptr}, bias {ptr}, LMID 0: symbol search path: (, /lib/libc.so.6, /lib/ld-linux.so.2)
/lib/libc.so.6: dyn {ptr}, bias {ptr}, LMID 0: inherited symbol search path: (, /lib/libc.so.6, /lib/ld-linux.so.2)
/lib/ld-linux.so.2: dyn {ptr}, bias {ptr}, LMID 0: inherited symbol search path: (, /lib/libc.so.6, /lib/ld-linux.so.2)
3 libs seen.
|
1c24617c8d8851b24ae8e606b18a6eb6913315e2 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/historydata/examples/tudors.Rd.R | 4bf1c689126db9c72b351ed12bdc1b4f93a61b68 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 142 | r | tudors.Rd.R | library(historydata)
### Name: tudors
### Title: Tudor dynasty
### Aliases: tudors
### Keywords: datasets
### ** Examples
data(tudors)
|
89f3fb8601191173d11e7fe1dfcd7d60aa6492ae | 0f78a0bea50983a26d16ad50446da0d882467b1e | /R/data.R | a1e275f48cf88398aa973642195ba03c68533025 | [] | no_license | samstearns/mssp | 08f5fa57d4249a3064eea1586c984794682d44a8 | e5bef6d8a2f101a4c19e5a4875f4ab7f9b53888f | refs/heads/master | 2023-01-24T07:33:07.169268 | 2023-01-14T05:57:59 | 2023-01-14T05:57:59 | 73,012,816 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 10,719 | r | data.R | # List of years and URLs, in descending order
years <- c(2021, 2020, 2019, 2018, 2017, 2016, 2015, 2014, 2013);
per_capita_exps <- tolower(c("CapAnn_INP_All", "CapAnn_INP_S_trm", "CapAnn_INP_Rehab", "CapAnn_INP_Psych",
"CapAnn_HSP", "CapAnn_SNF", "CapAnn_OPD", "CapAnn_PB", "CapAnn_AmbPay", "CapAnn_HHA", "CapAnn_DME",
"Per_Capita_Exp_ALL_ESRD_BY1",
"Per_Capita_Exp_ALL_DIS_BY1",
"Per_Capita_Exp_ALL_AGDU_BY1",
"Per_Capita_Exp_ALL_AGND_BY1",
"Per_Capita_Exp_ALL_ESRD_BY2",
"Per_Capita_Exp_ALL_DIS_BY2",
"Per_Capita_Exp_ALL_AGDU_BY2",
"Per_Capita_Exp_ALL_AGND_BY2",
"Per_Capita_Exp_ALL_ESRD_BY3",
"Per_Capita_Exp_ALL_DIS_BY3",
"Per_Capita_Exp_ALL_AGDU_BY3",
"Per_Capita_Exp_ALL_AGND_BY3",
"Per_Capita_Exp_ALL_ESRD_PY",
"Per_Capita_Exp_ALL_DIS_PY",
"Per_Capita_Exp_ALL_AGDU_PY",
"Per_Capita_Exp_ALL_AGND_PY",
"Per_Capita_Exp_TOTAL_PY"
));
num_benes <- tolower(c(
"N_AB",
"N_AB_Year_ESRD_BY3",
"N_AB_Year_DIS_BY3",
"N_AB_Year_AGED_Dual_BY3",
"N_AB_Year_AGED_NonDual_BY3",
"N_AB_Year_PY",
"N_AB_Year_ESRD_PY",
"N_AB_Year_DIS_PY",
"N_AB_Year_AGED_Dual_PY",
"N_AB_Year_AGED_NonDual_PY",
"N_Ben_Age_0_64",
"N_Ben_Age_65_74",
"N_Ben_Age_75_84",
"N_Ben_Age_85plus",
"N_Ben_Female",
"N_Ben_Male",
"N_Ben_Race_White",
"N_Ben_Race_Black",
"N_Ben_Race_Asian",
"N_Ben_Race_Hisp",
"N_Ben_Race_Native",
"N_Ben_Race_Other"
));
util_rates <- tolower(c(
"ADM",
"ADM_S_Trm",
"ADM_L_Trm",
"ADM_Rehab",
"ADM_Psych",
"P_EDV_Vis",
"P_EDV_Vis_HOSP",
"P_CT_VIS",
"P_MRI_VIS",
"P_EM_Total",
"P_EM_PCP_Vis",
"P_EM_SP_Vis",
"P_Nurse_Vis",
"P_FQHC_RHC_Vis",
"P_SNF_ADM",
"SNF_LOS",
"SNF_PayperStay"
));
bmrk_values <- tolower(c("BnchmkMinExp", "GenSaveLoss", "DisAdj", "EarnSaveLoss", "UpdatedBnchmk", "HistBnchmk", "ABtotBnchmk", "ABtotExp", "Adv_Pay_Amt"));
percentage_savings <- tolower(c("Sav_Rate", "MinSavPerc", "MaxShareRate", "FinalShareRate", "QualScore"));
# "ACO_Num", "ACO_NAME", "N_AB", "QualScore", "Per_Capita_Exp_TOTAL_PY", "HistBnchmk", "UpdatedBnchmk", "Performance_Year"))
#' Downloads PUF files from CMS website
#'
#' @param year MSSP performance year.
#' @return Data frame with mssp data.
#' @examples
#' load_puf_file(2016)
#' @export
load_puf_file <- function(year="1000") {
if (!requireNamespace("jsonlite", quietly = TRUE)) {
stop("Package \"pkg\" needed for this function to work. Please install it.", call. = FALSE)
}
if (year == 2013) {
address <- "https://data.cms.gov/data-api/v1/dataset/bc90f498-76f4-4e75-8225-8aae30336059/data"
} else if (year == 2014) {
address <- "https://data.cms.gov/data-api/v1/dataset/0ef9b1e2-e23b-4a01-921c-1ac7290c814b/data"
} else if (year == 2015) {
address <- "https://data.cms.gov/data-api/v1/dataset/156c00e2-ab42-4923-b54f-09c031f5f28d/data"
} else if (year == 2016) {
address <- "https://data.cms.gov/data-api/v1/dataset/a290fdd3-976a-4fc9-9139-a98193b3af82/data"
} else if (year == 2017) {
address <- "https://data.cms.gov/data-api/v1/dataset/3b306450-1836-417b-b779-7d70fd2fc734/data"
} else if (year == 2018) {
address <- "https://data.cms.gov/data-api/v1/dataset/80c86127-8839-4f35-b87b-aa37664afd19/data"
} else if (year == 2019) {
address <- "https://data.cms.gov/data-api/v1/dataset/9c3a4c69-7d00-4307-9b6f-a080dc90417e/data"
} else if (year == 2020) {
address <- "https://data.cms.gov/data-api/v1/dataset/8f073013-9db0-4b12-9a34-5802bdabbdfe/data"
} else if (year == 2021) {
address <- "https://data.cms.gov/data-api/v1/dataset/73b2ce14-351d-40ac-90ba-ec9e1f5ba80c/data"
} else {
print("Invalid performance year. Please select a value between 2013 and 2021")
return()
}
df <- jsonlite::fromJSON(address);
# Convert the column types from chr by writing to a temporary CSV
filename = paste0(tempdir(), "/mssp", ".csv");
write.csv(df, file = filename );
dfa <- read.csv(filename)
# Standardize the column names to lower case. 2019 is all lower case, other years camel case
names(dfa) <- tolower(names(dfa))
# Remove the temporary CSV file
unlink(filename)
for (value in per_capita_exps) {
if(value %in% colnames(dfa)) {
dfa[, value] <- gsub(",", "", dfa[,value])
dfa[, value] <- as.numeric(dfa[,value])
}
}
for (value in num_benes) {
if(value %in% colnames(dfa)) {
dfa[, value] <- gsub(",", "", dfa[,value])
dfa[, value] <- as.numeric(dfa[,value])
}
}
for (value in util_rates) {
if(value %in% colnames(dfa)) {
dfa[, value] <- gsub(",", "", dfa[,value])
dfa[, value] <- as.numeric(dfa[,value])
}
}
for (value in bmrk_values) {
if(value %in% colnames(dfa)) {
dfa[, value] <- gsub(",", "", dfa[,value])
dfa[, value] <- as.numeric(dfa[,value])
}
}
for (value in percentage_savings) {
if(value %in% colnames(dfa)) {
dfa[, value] <- gsub("%", "", dfa[,value])
# 2014 qual scores includes label P4R for ACOS who selected this option.
# Remove this with NAs to avoid warning NAs introduced by coercion
if(year == 2014) {
dfa[, value] <- gsub("P4R", "", dfa[,value])
}
dfa[, value] <- as.numeric(dfa[,value])
if (year == 2021 && (value == "sav_rate" || value == "minsavperc" || value == "qualscore" || value == "maxsharerate" || value == "finalsharerate" )) {
dfa[, value] <- dfa[,value] / 100.0
}
if (year == 2020 && ( value == "qualscore" )) {
dfa[, value] <- dfa[,value] / 100.0
}
if (year == 2019 && ( value == "qualscore" )) {
dfa[, value] <- dfa[,value] / 100.0
}
if (year == 2015 && (value == "sav_rate" || value == "minsavperc" || value == "qualscore" || value == "finalsharerate" )) {
dfa[, value] <- dfa[,value] / 100.0
}
if (year == 2014 && (value == "sav_rate" || value == "minsavperc" )) {
dfa[, value] <- dfa[,value] / 100.0
}
}
}
return (dfa)
}
#' Downloads PUF files from CMS website from multiple years and integrates into a single dataset.
#' @return Data frame with mssp data from all years.
#' @examples
#' load_multi_year_db()
#' @export
load_multi_year_db <- function() {
most_recent_year <- years[1]
print(paste("Creating multi-year DB for ", length(years), " years. Most recent year =", most_recent_year))
# for each year in URL_Lookup
for (year in years) {
print(paste("Dowloading PUF file for", year))
if ( most_recent_year == year ) {
# Download the most recent year.
# Use the structure of this year for the multi-year database
most_recent_year_data <- load_puf_file(year)
# Add a column to record the year
most_recent_year_data$performance_year <- year
ncols <- length(most_recent_year_data)
# Preserve original column names for the most recent year
original_col_names <- colnames(most_recent_year_data)
colnames(most_recent_year_data) <- tolower(colnames(most_recent_year_data))
multi_year_data <- most_recent_year_data
} else {
# prior years
b <- load_puf_file(year)
b$performance_year <- year
# Standardize savings rate calculation
if (year == 2014 | year == 2015) {
b$sav_rate <- b$sav_rate / 100.0;
b$minsavperc <- b$minsavperc / 100.0;
}
nrows <- nrow(b)
# Standardize the column names to merge data frames
colnames(b) <- tolower(colnames(b))
# Create a new DF with N rows from B and N cols from A
df <- data.frame(matrix(NA, nrow = nrows, ncol = ncols))
colnames(df) <- colnames(most_recent_year_data)
# Loop through each column in A
print(paste("Merging columns for", year))
for (i in 1:ncols) {
col <- colnames(most_recent_year_data)[i]
# Look up the position of the column by name
colIndex <- which(names(b)==col)
if (identical(colIndex, integer(0))) {
# if not in B, copy blank cell
print(paste(col, " is not found in PY ", year))
} else {
# if found in B, copy to the dataframe
df[,i] <- b[,colIndex]
}
colnames(df)[i] <- colnames(most_recent_year_data)[i]
}
# Paste the two dataframes togeter
multi_year_data <- rbind(multi_year_data, df)
} # end prior year
}
colnames(multi_year_data) <- original_col_names
# Add the risk score
if (multi_year_data$performance_year != 2013) {
multi_year_data$cms_hcc_riskscore_py <- (multi_year_data$cms_hcc_riskscore_dis_py * multi_year_data$n_ab_year_dis_py +
multi_year_data$cms_hcc_riskscore_esrd_py * multi_year_data$n_ab_year_esrd_py +
multi_year_data$cms_hcc_riskscore_agdu_py * multi_year_data$n_ab_year_aged_dual_py +
multi_year_data$cms_hcc_riskscore_agnd_py * multi_year_data$n_ab_year_aged_nondual_py) / multi_year_data$n_ab
} else {
multi_year_data$cms_hcc_riskscore_py <- NULL;
}
# return DB
return(multi_year_data)
}
#' Downloads PUF files from CMS website and applies enhancements
#' @param year MSSP performance year.
#' @return Data frame with mssp data.
#' @examples
#' load_puf_file(2016)
#' @export
load_enhanced_puf_file <- function(year="1000") {
df <- load_puf_file(year)
return (enhance_puf_file(df, year))
}
#' Applies enhancements to a datafrane containing PUF file data
#' @param df Dataframe containing downloaded PUF file.
#' @param year MSSP performance year.
#' @return Data frame with mssp data.
#' @examples
#' load_puf_file(df, 2016)
#' @export
enhance_puf_file <- function(df, year) {
df$performance_year <- year
if (year > 2017) {
colnames(df)[1] <- "aco_num"
}
# Ensure column names have consistent capitalization, due to 2018 being lowercase
names(df) <- tolower(names(df))
if (year != 2013) {
df$cms_hcc_riskscore_py <- (df$cms_hcc_riskscore_dis_py * df$n_ab_year_dis_py +
df$cms_hcc_riskscore_esrd_py * df$n_ab_year_esrd_py +
df$cms_hcc_riskscore_agdu_py * df$n_ab_year_aged_dual_py +
df$cms_hcc_riskscore_agnd_py * df$n_ab_year_aged_nondual_py) / df$n_ab
}
return(df)
}
|
6c4cafdd4f31fc48daf101a0e6425f3902dc7474 | 36332b7f1e72c0cfcb1d2b4f35bb9f7f6a785194 | /전처리 과정.R | 9debc0b19d398eb36a944543c2e71eef8ea95eac | [] | no_license | Ji2z/TextMining-Smishing_Filtering | d3d8e5c7f303fdb12c41d15fd15e3bc49b9ded01 | 90f4e37b2aa1603e8a12e9d9614bca6c284e6ce2 | refs/heads/main | 2023-06-02T03:57:18.202775 | 2021-06-25T07:44:05 | 2021-06-25T07:44:05 | 380,155,286 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,040 | r | 전처리 과정.R | ## 전처리
library(KoNLP)
library(stringr)
library(tm)
useSejongDic()
##파일 불러오기
ding <- readLines("Dingdong.txt")
head(ding)
##중복제거, 특수기호 제거
ding2 <- unique(ding)
ding2 <- str_replace_all(ding,"[^[:alpha:][:blank:]]"," ")
head(ding2)
##단어로 분리후 영어제거
ding2 <- extractNoun(ding2)
ding2 <-lapply(ding2,function(x){gsub("[^가-힣, ]"," ",x)})
head(ding2)
##한 글자, 긴 문자 제거
ding2 <- lapply(ding2, function(x){
Filter(function(y){nchar(y)<=10&nchar(y)>1},x)
})
head(ding2)
##정렬하지 않고 이중리스트를 1차원의 리스트로 변환(tm에 넘겨주기 위한 작업) 후 콤마(,) 제거
ding2 <- format(ding2,justify = "none")
ding2 <- gsub("\\,","",ding2)
head(ding2)
##tm을 통해 코퍼스로 만든다.
ding3 <- Corpus(VectorSource(ding2))
##Doc-Term Matrix 만들기 (weighting을 kmeans,knn,svm은 tf-idf, naive bayes는 바이너리)
ding_tfidf <- DocumentTermMatrix(ding3, control = list(weighting = function(x)
weightTfIdf(x, normalize = F)))
ding_bin <- DocumentTermMatrix(ding3,control = list(weighting=function(x)
weightBin(x)))
##Term sparse(단어 출현 확률이 99%에 들지 않으면 제거) ##단어 86개
library(class)
cleandtm = removeSparseTerms(ding_tfidf,0.99)
cleandtm2 = removeSparseTerms(ding_bin,0.99)
cleandtm2$dimnames
##데이터프레임으로 변환
cleanframe <- as.data.frame(as.matrix(cleandtm))
cleanframe2 <- as.data.frame(as.matrix(cleandtm2))
##txt파일에 순서대로 153개의 비정상 문자와 80개의 정상 문자를 넣었으므로 그대로 라벨 지정
cleandata <- cbind("Class" = c(rep("spam",153),rep("ham",80)), cleanframe)
cleandata2 <- cbind("Class" = c(rep("spam",153),rep("ham",80)), cleanframe2)
#전체 데이터 중 랜덤으로 60%는 트레이닝, 40%는 테스트 (뽑은 샘플은 document의 번호를 리턴한다.)
train <- sample(nrow(cleandata),ceiling(nrow(cleandata)*0.60))
test = (1:nrow(cleandata))[-train]
|
aaa17115580114be2ca380bf9a8be026da4eccc5 | 6713b68c912af377c741b26fe31db0fe6f6194d4 | /2nd Term/Statistics_Econometrics/Slides/Lecture 6 - Model and data issues/Model and data issues_part II.R | 2fbeecbff53e8197ae20bd68bb1eb54751b9098d | [] | no_license | Lanottez/IC_BA_2020 | 820e8d9c1dbb473ed28520450ec702f00c6684ed | 8abd40c6a5720e75337c20fa6ea89ce4588016af | refs/heads/master | 2023-08-25T05:52:08.259239 | 2021-11-03T07:27:11 | 2021-11-03T07:27:11 | 298,837,917 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,138 | r | Model and data issues_part II.R | library(ggplot2)
library(dplyr)
#### Non-random sample
load("bwght.RData")
data <- data %>% mutate(rich = ifelse(faminc > median(faminc), 1, 0))
data$rich <- as.factor(data$rich)
ggplot(data, aes(x = cigs, y = bwght, color = rich)) + geom_point() +
geom_smooth(method='lm') + scale_color_brewer(palette="Set1")
data <- data %>% mutate(heavy = ifelse(bwght > median(bwght), 1, 0))
data$heavy <- as.factor(data$heavy)
ggplot(data, aes(x = cigs, y = bwght, color = heavy)) + geom_point() +
geom_smooth(method='lm') + scale_color_brewer(palette="Set1")
#### Outlier
load("rdchem.RData")
rdchem.m1 <- lm(rdintens ~ sales + profmarg, data)
ggplot(rdchem.m1, aes(.fitted, .stdresid)) + geom_point() +
stat_smooth(method = "loess") + xlab("Fitted Value") + ylab("Standardized Residuals")
ggplot(rdchem.m1, aes(.hat, .cooksd)) + geom_point() +
stat_smooth(method = "loess") + xlab("Leverage") + ylab("Cook's Distance")
# standardized residual
rstandard(rdchem.m1)
# leverage value
hatvalues(rdchem.m1)
# cooks distance
cooks.distance(rdchem.m1)
data.new <- data[-c(10), ]
rdchem.m2 <- lm(rdintens ~ sales + profmarg, data.new) |
3fdbcb966559b44380abcd9686564bbb58f722e7 | d31906739e666f15ac64dc5ddc8e45522490eb5e | /ui.R | bed3bfd1593db5b6fe820ee84db3f45026a42672 | [] | no_license | michbur/butterflier | 77598a5ff718c7ac0f3c736c21f8df19662c9d89 | 8393b1ee63588527afe5c6f94a66a46b214db7ff | refs/heads/master | 2021-01-01T15:23:32.350466 | 2014-11-06T10:04:24 | 2014-11-06T10:04:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 622 | r | ui.R | library(shiny)
shinyUI(pageWithSidebar(
headerPanel("Butterflier"),
sidebarPanel(
radioButtons("what_plot", "What do you want to plot?",
c("Heart" = "heart",
"Butterfly" = "butterfly")),
numericInput("number_points", "Number of points:", 1000,
min = 100, max = 1200, step = 10),
numericInput("min_points", "Minimum size of the point:", 1,
min = 1, max = 10, step = 1),
numericInput("max_points", "Maximum size of the point:", 15,
min = 11, max = 20, step = 1)
),
mainPanel(
plotOutput("nice_image")
)
))
|
db08bd9d1a7bd8f051d047b6af60d50cc2b5382f | e655973fd50b63bdc47df5923aed1d31053344ad | /code/02_penguins.R | 4e93b7df547b7de0db4d80a59e691c10f6647f4e | [] | no_license | emmadunne/R-Basics | 128086c4c3d28d2909515c3b82699270d2280190 | ca79b9a7bdacb8b5bc8e22201ea872bddc277a3c | refs/heads/main | 2023-08-29T21:11:21.315365 | 2021-11-08T14:14:40 | 2021-11-08T14:14:40 | 311,338,463 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,111 | r | 02_penguins.R | # ******************************************************
#
# R Basics: Getting started in R & RStudio
#
# - E. Dunne 2021
#
# ______________________________________________________
#
# 2. Plotting the Palmer Penguins!
#
# ******************************************************
## For this plotting tutorial, we'll be using data from a R package, palmerpenguins:
## https://allisonhorst.github.io/palmerpenguins/
install.packages("palmerpenguins")
library(palmerpenguins)
## To cite this pacakeg (or any R package) in a publication, we would use:
citation("palmerpenguins")
## Load additional packages:
library(tidyverse) # data manipulation etc.
library(ggpubr) # for arranging plot grids
# Prepping the data -------------------------------------------------------
## We'll use the built-in dataset 'penguins', which is a simplified version of the raw data; see ?penguins for more info.
View(penguins)
## Looking at this dataset, what can you tell about the data?
## How many penguins species were studied? And how many penguins in total?
## What variables were measured?
## Where was the data collected from?
# Plotting in R -----------------------------------------------------------
## Plotting in base R is really simple - Here's a simple regression plot showing penguin body mass vs. flipper length:
plot(penguins$flipper_length_mm ~ penguins$bill_length_mm)
## But this isn't quite publication-worthy just yet
## Many people prefer using the package ggplot2 (part of the tidyverse), as it has greater customisation
## (Even the BBC use it: https://bbc.github.io/rcookbook/)
## The essential elements of a ggplot graphic are:
# 1. Data element i.e. your dataset
# 2. Aesthetics element (mapping = aes) i.e. x and y axes, etc.
# 3. Geometries element (geom_)
# Scatterplot -------------------------------------------------------------
## First, let's set up the base of the plot - note the data, aesthetics, and geom_ elements:
ggplot(data = penguins, mapping = aes(x = flipper_length_mm, y = bill_length_mm)) +
geom_point()
## Note that we receive a warning. about missing values - why might this be?
## Cool, now let's add a bit of colour and shapes:
ggplot(data = penguins, aes(x = flipper_length_mm, y = bill_length_mm)) +
geom_point(aes(color = species, shape = species), size = 3, alpha = 0.8)
## Let's pick our own colours (Be sure that your colours are friendly for those with colour blindness or other visual impairments!)
## Names of R colours can be found here: https://www.nceas.ucsb.edu/sites/default/files/2020-04/colorPaletteCheatsheet.pdf
## Or you can use hex codes (see: https://htmlcolorcodes.com/)
ggplot(data = penguins, aes(x = flipper_length_mm, y = bill_length_mm)) +
geom_point(aes(color = species, shape = species), size = 3, alpha = 0.8) +
scale_color_manual(values = c("#DC267F", "#FFB000", "#648FFF"), guide = guide_legend(title = "Species"))
## Now let's change the theme to make it look more publication-ready - ggplot has plenty of built-in themes:
ggplot(data = penguins, aes(x = flipper_length_mm, y = bill_length_mm)) +
geom_point(aes(color = species, shape = species), size = 3, alpha = 0.8) +
scale_color_manual(values = c("#DC267F", "#FFB000", "#648FFF"), guide = guide_legend(title = "Type")) +
theme_minimal()
## But you can also create your own custom themes - ideal for keeping all your plots looking similar:
mytheme <- theme_minimal() + theme(panel.background = element_blank(),
panel.border = element_rect(colour = "black", fill = NA),
axis.text.x = element_text(size=10),
axis.text.y = element_text(size=10),
axis.title = element_text(size=12),
legend.position = "top")
ggplot(data = penguins, aes(x = flipper_length_mm, y = bill_length_mm)) +
geom_point(aes(color = species, shape = species), size = 3, alpha = 0.8) +
scale_color_manual(values = c("#DC267F", "#FFB000", "#648FFF"), guide = guide_legend(title = "Type")) +
mytheme
## We can even layer some stats over plots, such as a regression line:
ggplot(data = penguins, aes(x = flipper_length_mm, y = bill_length_mm)) +
geom_point(aes(color = species, shape = species), size = 3, alpha = 0.8) +
geom_smooth(aes(colour = species, fill = species), method = "lm", alpha = 0.3) +
scale_color_manual(values = c("#DC267F", "#FFB000", "#648FFF"), guide = guide_legend(title = "species")) +
scale_fill_manual(values = c("#DC267F", "#FFB000", "#648FFF"), guide = guide_legend(title = "species")) +
mytheme
## Now let's get this plot in shape for a publication
## We'll rename our axes, give it a title, and refine our theme
penguin_scatter <- ggplot(data = penguins, aes(x = flipper_length_mm, y = bill_length_mm)) +
geom_point(aes(color = species, shape = species), size = 3, alpha = 0.8) +
theme_minimal() +
scale_color_manual(values = c("#DC267F", "#FFB000", "#648FFF")) +
labs(x = "Flipper length (mm)", y = "Bill length (mm)",
color = "Species", shape = "Species") +
theme(legend.position = c(0.85, 0.15),
legend.background = element_rect(fill = "white", color = NA))
penguin_scatter # this will make the plot object appear in the Plots window
## Save the plot to your plots folder:
ggsave("./plots/scatter_plot.pdf", plot = penguin_scatter, width = 18, height = 15, units = "cm")
# Boxplots ----------------------------------------------------------------
## Another kind of plot that is useful for displaying continuous data is a boxplot
## Which species of penguin is the chonkiest?
## A basic boxplot can be constructed using ggplot like this:
ggplot(penguins, aes(x = species, y = body_mass_g)) +
geom_boxplot() # adds a boxplot element
## Now let's get fancy!
## Remove or add the commented lines to add different elements to the plot
penguin_box <- ggplot(data = penguins, aes(x = species, y = body_mass_g, colour = species)) +
#coord_flip() + # flip the plot to be horizontal
geom_boxplot(color = "gray60", fill = c("#DC267F", "#FFB000", "#648FFF"), alpha = 0.3, lwd = 0.3) +
geom_jitter(aes(color = species), alpha = 0.5, size = 3, position = position_jitter(width = 0.2, seed = 0)) +
scale_color_manual(values = c("#DC267F", "#FFB000", "#648FFF")) +
theme_minimal() + theme(legend.position = "none") +
labs(x = "", y = "Body mass (g)")
penguin_box
# Plots panel -------------------------------------------------------------
## Combine both plots using the ggpubr package
penguin_plots <- ggarrange(penguin_scatter, penguin_box,
common.legend = TRUE, legend = "bottom",
labels = c("(a)", "(b)"),
ncol = 2, nrow = 1)
penguin_plots <- annotate_figure(penguin_plots, top = text_grob("Adelie, Chinstrap and Gentoo Penguins at Palmer Station LTER",
face = "bold", size = 14))
penguin_plots # take a look
ggsave(plot = penguin_plots,
width = 21, height = 29, dpi = 500, units = "cm",
filename = "./plots/penguin_plots.pdf",
useDingbats=FALSE)
# Exploring data ----------------------------------------------------------
## R is a really powerful tool for exploring your data.
## Here are some examples using the Palmer penguins data:
## simple boxplots to display counts that can be applied to any dataset:
penguins %>%
count(species) %>%
ggplot() + geom_col(aes(x = species, y = n, fill = species)) +
geom_label(aes(x = species, y = n, label = n)) +
scale_fill_manual(values = c("darkorange","purple","cyan4")) +
theme_minimal() +
labs(title = 'Penguins Species & Count')
## Exploring correlations using ggplot plot extension, GGally
require("GGally")
penguins %>%
select(species, body_mass_g, ends_with("_mm")) %>%
GGally::ggpairs(aes(color = species)) +
scale_colour_manual(values = c("darkorange","purple","cyan4")) +
scale_fill_manual(values = c("darkorange","purple","cyan4"))
|
48520859622a2fd1705571718182a08a48e1e847 | a6ac32e43c91a3e4594685a585455ebe89c9a04e | /man/findRoot.Rd | 108913c28262d4282deb0e0df16dd2932f09356f | [] | no_license | heibl/megaptera | 6aeb20bc83126c98603d9271a3f1ae87311eedc1 | 5e8b548b01c40b767bd3bb3eb73d89c33b0bc379 | refs/heads/master | 2021-07-08T16:44:30.106073 | 2021-01-11T13:58:04 | 2021-01-11T13:58:04 | 55,764,237 | 5 | 0 | null | 2019-02-13T13:50:43 | 2016-04-08T08:44:44 | R | UTF-8 | R | false | true | 760 | rd | findRoot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/findRoot.R
\name{findRoot}
\alias{findRoot}
\title{Lineage Down to the Root}
\usage{
findRoot(x, what)
}
\arguments{
\item{x}{An object of class \code{\link{megapteraProj}}.}
\item{what}{A character string, either \code{"ingroup"}, \code{"outgroup"},
or \code{"both"}.}
}
\value{
A data frame with three columns: \item{id}{the unique identifier of
the taxon} \item{taxon}{the scientific name of the taxon} \item{rank}{the
rank of the taxon} The row order is from lower to higher ranked taxa, i.e.
backwards into evolutionary time.
}
\description{
Finds the lineage from one taxon, or the most recent common
ancestor of several taxa, down to the root of the Tree of Life.
}
|
f9eb5edccbe332289763fbb662e962b1469ea78c | d080effd2b36deb9be0e0e7d9fed15267adccea1 | /R/offspring.R | 4e4406feb93e1a6a94b64e0c0af01ab1b92e5dc7 | [] | no_license | xiangpin/tidytree | cf2246e2746a50b493ed0295ad35738917888087 | ea4bf11d0b2f45312a22afad10c1b3f397248a5c | refs/heads/master | 2023-08-18T02:11:11.826299 | 2023-07-15T07:57:00 | 2023-07-15T07:57:00 | 227,064,787 | 0 | 0 | null | 2019-12-10T08:19:15 | 2019-12-10T08:19:14 | null | UTF-8 | R | false | false | 2,324 | r | offspring.R | ##' @method child tbl_tree
##' @export
##' @rdname child
##' @examples
##' library(ape)
##' tree <- rtree(4)
##' x <- as_tibble(tree)
##' child(x, 4)
child.tbl_tree <- function(.data, .node, ...) {
valid.tbl_tree(.data)
if (is.character(.node)) {
.node <- .data$node[.data$label == .node]
}
.data[.data$parent == .node & .data$parent != .data$node,]
}
##' @method offspring tbl_tree
##' @export
##' @rdname offspring
##' @examples
##' library(ape)
##' tree <- rtree(4)
##' x <- as_tibble(tree)
##' offspring(x, 4)
offspring.tbl_tree <- function(.data, .node, tiponly = FALSE, self_include = FALSE, ...) {
if (missing(.node) || is.null(.node)) {
stop(".node is required")
}
if (length(.node) == 1) {
res <- .offspring.tbl_tree_item(.data = .data, .node = .node,
tiponly = tiponly, self_include = self_include, ...)
} else {
res <- lapply(.node, function(node) {
.offspring.tbl_tree_item(.data = .data, .node = node,
tiponly = tiponly, self_include = self_include, ...)
})
names(res) <- .node
}
return(res)
}
#' @noRd
#' @keywords internal
.offspring.tbl_tree_item <- function(.data, .node, tiponly = FALSE, self_include = FALSE, ...) {
x <- child.tbl_tree(.data, .node)
## https://github.com/GuangchuangYu/ggtree/issues/239
rn <- rootnode.tbl_tree(.data)$node
x <- x[x$node != rn, ]
if (nrow(x) == 0) {
if (self_include) {
x <- .data[.data$node == .node, ]
}
return(x)
}
## id <- x$node
## i <- 1
## while(i <= length(id)) {
## id <- c(id, child(.data, id[i])$node)
## i <- i + 1
## }
## filter_(.data, ~ node %in% id)
parent <- .data$parent
children <- .data$node
## n <- length(parent)
n <- max(parent)
kids <- vector("list", n)
for (i in seq_along(parent)) {
kids[[parent[i]]] <-c(kids[[parent[i]]], children[i])
}
id <- x$node
i <- 1
while(i <= length(id)) {
id <- c(id, kids[[id[i]]])
i <- i + 1
}
if (self_include) {
id <- c(.node, id)
}
sp <- .data[children %in% id,]
if (tiponly) {
return(sp[sp$node < rn,])
}
return(sp)
}
|
1385e2e3ea99e740bcd9584d3ce64a4ce6e4d851 | cb67ef36ed6beb359ef05e1987393a50b0717c5e | /flashcards_anki.r | f2c58d0d23c0e24655b9ea90ee92e1cbdb3edabd | [] | no_license | AMChierici/study_notes | 8b368c5eacd553281038f3e7a85bd4db82ac5fd7 | ac12f82aa9dd40e28634b43faaae0899036600e0 | refs/heads/master | 2020-04-15T11:13:52.068093 | 2013-09-19T12:32:00 | 2013-09-19T12:32:00 | 41,672,226 | 0 | 3 | null | 2015-08-31T11:40:27 | 2015-08-31T11:40:26 | null | UTF-8 | R | false | false | 565 | r | flashcards_anki.r | ct2_card_number_f <- 2*(1:{466/2}) - 1
ct2_deck <- paste("<img src=\"ct2_Page_", sprintf("%03d", ct2_card_number_f), ".png\">;<img src=\"ct2_Page_", sprintf("%03d", {ct2_card_number_f + 1}), ".png\">", collapse = "\n", sep = "")
write(deck, file = "c:/matt/temp/ct2_deck.txt")
st8_card_number_f <- 2*(1:{562/2}) - 1
st8_deck <- paste("<img src=\"st8_Page_", sprintf("%03d", st8_card_number_f), ".png\">;<img src=\"st8_Page_", sprintf("%03d", {st8_card_number_f + 1}), ".png\">", collapse = "\n", sep = "")
write(st8_deck, file = "c:/matt/temp/st8_deck.txt")
|
6efdf44f9a63a54b3e9afa5d566cedaf881d397e | e20ae46b59c09099a3b49e1ea018dfc855c11541 | /R/data_documentation.R | 5ab29f99987f06357a93f84bab1d5f7e02cb1b0d | [] | no_license | Daenecompass/eliter | a081eb7f89ab34e9f2e34c3937e791c60a776873 | 884885a2e747e6cbb9560d4b11851ddc4e7c640b | refs/heads/master | 2023-03-17T18:19:04.883599 | 2020-11-16T21:25:50 | 2020-11-16T21:25:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 22,698 | r | data_documentation.R | #' Danish Power Elite Careers
#'
#' A set of sequences
#' @name careers
#' @doctype data
NULL
#' Danish Elite Network 2012/2013
#'
#' A affiliation network
#'
#' @name den
#' @docType data
NULL
#' Power Elite 2013
#'
#' A propopographic dataset on the Danish power elite in 2013
#'
#' @name pe13
#' @docType data
NULL
#' Danish Elite Network 2012/2013
#'
#' A affiliation network
#'
#' @name den17
#' @docType data
NULL
#'Directors 2008 dataset
#'
#'Prosopographical data on the top 100 CEO's from the 82 largest Danish
#'corporations in 2008.
#'@details The directors dataset is prosopographical data collected from a wide
#' array of sources on biographic and corporate information. Sources include
#' the Danish variant of Who's Who (Blaa Bog), a private business information
#' database (Greens Erhvervsinformation), journalistic portrait articles,
#' article search engines, bibliographic databases and financial reports. CEOs
#' from 82 corporations were selected according to their position as CEO in
#' December 2007. 18 executives are included on other criteria, taking into
#' account the magnitude of the corporations and issues regarding ownership and
#' control, resulting in a final population of 100 CEOs. The 82 corporations
#' have formal ownership and management located in Denmark and were selected
#' through either financial capital, measured as having a turnover of over five
#' billion DKK (650 million Eur.), or organizational capital, defined as having
#' at least 5000 employees; 34 corporations were included on both criteria, 45
#' on financial capital and three on organizational capital alone. To avoid
#' including investors, rather than executives, a minimum of 500 employees was
#' also required, excluding 12 firms. Companies acting only as subsidiaries
#' were also excluded. Data is for public use and no author permission is
#' needed, but we would love to hear from you if you find the data useful.
#' You can find an analysis of this dataset in the \link{soc.ca} package.
#'
#'@name directors08
#'@docType data
#'@author Christoph Ellersgaard
#'@author Anton Grau Larsen
#'@references Ellersgaard, Christoph, Anton Grau Larsen, og Martin D. Munk.
#' 2012. "A Very Economic Elite: The Case of the Danish Top CEOs". Sociology.
#'@references Ellersgaard, Christoph Houman, og Anton Grau Larsen. 2010.
#' "Firmaets Maend". Master Thesis, Copenhagen: University of Copenhagen.
#'@references Ellersgaard, Christoph Houman, og Anton Grau Larsen. 2011.
#' "Kulturel kapital blandt topdirektoerer i Danmark - En domineret
#' kapitalform?" Dansk Sociologi 22(3):9-29.
#'@references Larsen, Anton Grau, og Christoph Houman Ellersgaard. 2012. "Status
#' og integration paa magtens felt for danske topdirektoerer". Praktiske
#' Grunde. Nordisk tidsskrift for kultur- og samfundsvidenskab 2012(2-3).
#'@keywords data
NULL
#' Top 1000 corporations in Denmark in 2013
#'
#' This dataset was used for the article: "Who listens to the top? Integration of the largest corporations across sectoral networks" published in Acta Sociologica and written by Anton Grau Larsen and Christoph Ellersgaard in 2017.
#' The example is the code that replicates the analysis in that article.
#' @name corp13
#' @docType data
#' @examples
#' # Who listens to the top -----
#' library(eliter)
#' library(Matrix)
#' library(RColorBrewer)
#' library(ggthemes)
#' library(car)
#'
#' data(corp13)
#'
#' # Active variables ------
#' active <- data.frame(Turnover = rank(corp13$OMSÆTNING.10, na.last = "keep"),
#' Employees = rank(corp13$ANSATTE.10, na.last = "keep"),
#' Equity = rank(corp13$EGENKAPITAL.10, na.last = "keep"),
#' Dominating = corp13$dominerende_num,
#' Finance = corp13$finance,
#' Coop = corp13$coop,
#' Global500 = corp13$Global,
#' Books = corp13$books,
#' "Radio and Tv" = corp13$radio_tv
#' )
#'
#' rownames(active) <- corp13$ORG_NAVN
#' colSums(sapply(active, is.na))
#' active.complete <- na.omit(active)
#'
#' num.mem <- corp13[, grep("Memberships",colnames(corp13))]
#'
#'
#' # Table 2: Network statistics for each sectorial network ------
#' # § A table, divided by sector, with number of affiliations, individuals, largest component, diameter of the largest component
#'
#'
#' describe.sector <- function(x){
#' out <- list()
#' out$"Affiliations" <- length(table(droplevels(x$AFFILIATION)))
#' out$"Postions" <- nrow(x)
#' out$"Individuals" <- length(table(droplevels(x$NAME)))
#' net.sector <- eliter::elite.network(droplevels(x), result = "affil")
#' net.sector.com <- largest.component(net.sector, cut.off = 0.001)
#' out$"Largest component" <- vcount(net.sector.com)
#' out$"Component diameter" <- diameter(net.sector.com, weights = NA, directed = FALSE)
#' out$"Highest degree" <- max(degree(net.sector, loops = FALSE, mode = "out"))
#' out
#' }
#'
#' table.2 <- t(sapply(sector.rels, describe.sector))
#'
#' #write.csv(table.2, file = "output_corporate/Table_2_Network_stats_per_sector.csv")
#'
#' # Table 3: Mean number of memberships per sector -----
#' table.3.list <- list()
#' table.3.list$Finance <- aggregate(num.mem, by = list(corp13$finance), mean, na.rm = TRUE)[2, -1]
#' table.3.list$Dominant <- aggregate(num.mem, by = list(corp13$dominerende_num), mean, na.rm = TRUE)[2, -1]
#' table.3.list$Coop <- aggregate(num.mem, by = list(corp13$coop), mean, na.rm = TRUE)[2, -1]
#' table.3.list$"Global 500" <- aggregate(num.mem, by = list(corp13$Global), mean, na.rm = TRUE)[2, -1]
#' table.3.list$"All" <- apply(num.mem, 2, mean, na.rm = TRUE)
#'
#' table.3b.list <- list()
#' table.3b.list$Finance <- aggregate(num.mem, by = list(finance), sd, na.rm = TRUE)[2, -1]
#' table.3b.list$Dominant <- aggregate(num.mem, by = list(corp13$dominerende_num), sd, na.rm = TRUE)[2, -1]
#' table.3b.list$Coop <- aggregate(num.mem, by = list(corp13$coop), sd, na.rm = TRUE)[2, -1]
#' table.3b.list$"Global 500" <- aggregate(num.mem, by = list(corp13$Global), sd, na.rm = TRUE)[2, -1]
#' table.3b.list$"All" <- apply(num.mem, 2, sd, na.rm = TRUE)
#'
#' table.3b <- t(do.call(what = rbind, args = table.3b.list))
#' table.3b <- round(table.3b, 1)
#' rownames(table.3b) <- sapply(str_split(rownames(table.3b), " : "), tail, 1)
#'
#' tab.3.n <- c(sum(corp13$finance, na.rm = TRUE),
#' sum(corp13$dominerende_num),
#' sum(corp13$coop),
#' sum(corp13$Global),
#' nrow(num.mem)
#' )
#'
#' table.3 <- t(do.call(what = rbind, args = table.3.list))
#' table.3 <- round(table.3, 1)
#' rownames(table.3) <- sapply(str_split(rownames(table.3), " : "), tail, 1)
#'
#' table.3[] <- paste(table.3[], " (", table.3b[], ")", sep = "")
#' table.3 <- rbind(table.3, N = tab.3.n)
#'
#' #write.csv(table.3, file = "output_corporate/Table_3_Mean_number_of_memberships_per_sector.csv")
#'
#' # Numbers for big linkers paragraph ----
#' net.corp.all <- elite.network(sector.rels$Corporations)
#' net.corp.com <- largest.component(net.corp.all, cut.off = 0)
#' table(V(net.corp.com)$memberships)
#' connectors <- V(net.corp.com)$name[V(net.corp.com)$memberships > 1]
#'
#' chairmen <- unique(sector.rels$Corporation$NAME[sector.rels$Corporation$ROLE == "Chairman"])
#' ceo <- unique(sector.rels$Corporation$NAME[sector.rels$Corporation$ROLE == "Chief executive"])
#' nonordinary <- unique(sector.rels$Corporation$NAME[sector.rels$Corporation$ROLE %in% c("Chief executive", "Chairman", "Vice chairman", "Executive")])
#' length(connectors)
#' sum(connectors %in% chairmen)
#' sum(connectors %in% ceo)
#' sum(connectors %in% nonordinary)
#' ordinary <- connectors[(connectors %in% nonordinary) == FALSE]
#' length(ordinary)
#'
#' chairmen.all <- V(net.corp.all)$name %in% chairmen
#' ceo.all <- V(net.corp.all)$name %in% ceo
#' ordinary.all <- (V(net.corp.all)$name %in% nonordinary) == FALSE
#' table(ordinary.all)
#' table(chairmen.all)
#'
#' rels.corp <- sector.rels$Corporation
#' rels.corp <- rels.corp[-which((rels.corp$NAME %in% nonordinary) == FALSE),]
#' net.corp.org.red <- elite.network(rels.corp, result = "affil")
#' net.corp.org.red <- largest.component(net.corp.org.red, cut.off = 0)
#' net.com.org <- elite.network(sector.rels$Corporations, result = "affil")
#' net.com.org <- largest.component(net.com.org, cut.off = 0)
#' vcount(net.corp.org.red) - vcount(net.com.org)
#'
#' # Correlation lines -----
#' changes <- list()
#' changes$vline <- geom_vline(xintercept = nrow(corp13) - 250, linetype = "dashed", color = "grey30")
#' changes$ylab <- ylab("Memberships")
#'
#' d.p <- data.frame(num.mem, omsætning = rank(corp13$OMSÆTNING, na.last = "keep"), navn = corp13$ORG_NAVN, check.names = FALSE)
#' mdp <- melt(d.p, id.vars = c("navn", "omsætning"))
#' p.cor.line.turnover <- ggplot(data = mdp, aes(x = omsætning, y = value)) + geom_point(shape = 21, alpha = 0.3, size = 1.2, fill = "whitesmoke") + facet_wrap(~variable, scales = "free_y") + geom_smooth(color = "red2", method = "loess") + theme_bw()
#' p.cor.line.turnover <- p.cor.line.turnover + changes + xlab("Rank by turnover")
#' p.cor.line.turnover <- p.cor.line.turnover + theme_tufte() + theme(strip.text = element_text(size = 12))
#'
#' # pdf(file = "output_corporate/Figure_2_Correlation_smooths_by_sector.pdf", height = 10, width = 12)
#' # p.cor.line.turnover
#' # dev.off()
#'
#' # Regression -----
#' reg <- as.matrix(active)
#' num.mem <- as.matrix(num.mem)
#' models.memberships <- lm(num.mem ~ reg)
#'
#' d <- data.frame(num.mem, reg)
#'
#' m1 <- lm(Memberships...Corporations ~ Turnover + Employees + Equity + Dominating + Finance + Coop + Global500 + Books + Radio.and.Tv, data = d)
#' vif.val <- car::vif(m1)
#'
#' reg.stats <- coef(summary(models.memberships))
#' for (i in seq_along(reg.stats)) colnames(reg.stats[[i]]) <- paste(names(reg.stats)[i],colnames(reg.stats[[i]]))
#'
#' sum.mod <- summary(models.memberships)
#' r.squared <- vector(length = length(sum.mod))
#' for (i in seq_along(sum.mod)) r.squared[i] <- sum.mod[[i]]$adj.r.squared
#' r.squared <- round(r.squared, 2)
#'
#' reg.stats <- do.call(cbind, reg.stats)
#' reg.estimate <- reg.stats[, grep("Estimate", colnames(reg.stats))]
#' reg.pvalue <- reg.stats[, grep("Pr(>|t|)", colnames(reg.stats))]
#' reg.error <- reg.stats[, grep("Error", colnames(reg.stats))]
#' reg.stars <- apply(reg.pvalue, 2, symnum, na = FALSE, cutpoints = c(0, 0.001, 0.01, 0.05, 0.1, 1), symbols = c("***", "**", "*", ".", " "))
#'
#' est.min <- format(round(reg.estimate - reg.error, 4), scientific = FALSE)
#' est.max <- format(round(reg.estimate + reg.error, 4), scientific = FALSE)
#' est.error <- paste("[", est.min, ":", est.max, "]", sep = "")
#' est.error <- format(round(reg.error, 5))
#'
#' reg.res <- reg.estimate
#' reg.res[] <- paste(format(round(reg.estimate, 5), scientific = FALSE), " (", reg.stars, ")", est.error, sep = "")
#'
#' reg.res <- rbind(reg.res, R.squared = r.squared)
#' reg.range <- apply(reg, 2, range, na.rm = TRUE)
#' reg.range <- paste("[", reg.range[1,], ":", reg.range[2,],"]", sep = "")
#' colnames(reg.res) <- names(sector.rels)
#'
#' reg.res <- cbind(Range = c("", reg.range, ""), Vif = c("", round(vif.val, 2), ""), reg.res)
#' reg.res <- rbind(reg.res, mean = c("", "", round(apply(num.mem, 2, mean), 2)))
#' reg.res <- rbind(reg.res, max = c("", "", round(apply(num.mem, 2, max), 2)))
#' reg.res
#'
#' #write.csv(reg.res, file = "output_corporate/table.4_Regressions.csv")
#'
#' ###############################################################################
#' # Figure 1 - The corporate network
#' market.leader <- corp13$dominerende
#' levels(market.leader) <- c("Incumbent", "Dominated")
#'
#' net.corp <- elite.network(sector.rels$Corporations, result = "affil")
#' lay <- layout_with_fr(net.corp, weights = E(net.corp)$weight^2, grid = "nogrid") * -1
#' p <- graph.plot(net.corp, lay, vertex.fill = market.leader, vertex.size = num.mem[,9], edge.alpha = 0.1, edge.color = "black", edge.size = 0.5)
#' p <- p + scale_fill_manual(values = c("red2", "white"), name = "Market position") + scale_size_continuous(range = c(1, 6), name = "Non-corporate \nmemberships")
#' p
#'
#' # pdf(file = "output_corporate/Figure.1_corporate_network.pdf", height = 11, width = 12)
#' # p
#' # dev.off()
#'
#' # Appendix -----
#' deg <- degree(net.corp)
#' com <- V(net.corp)$name %in% V(net.com.org)$name
#' turn <- corp13$OMSÆTNING.10
#' mem <- num.mem[,9]
#' com <- factor(com, labels = c("No", "Yes"))
#'
#' rd <- data.frame(Name = V(net.corp)$name, "Turnover" = turn, "Non-corporate memberships" = mem, "Degree" = deg, "In component" = com, check.names = FALSE)
#' rd.com <- rd[com == "Yes",]
#' rd.com <- rd.com[order(rd.com$Turnover, decreasing = TRUE),]
#'
#' rd.notcom <- rd[com == "No",]
#' rd.notcom <- rd.notcom[order(rd.notcom$Turnover, decreasing = TRUE),]
#'
#' # write.csv(head(rd.com, 25), file = "output_corporate/Appendix table_com.csv", row.names = FALSE)
#' # write.csv(head(rd.notcom, 25), file = "output_corporate/Appendix table_notcom.csv", row.names = FALSE)
NULL
# load("~/My Dropbox/R/Corporate/output_corporate/corporate_data_all.Rda")
# colnames(total.data)
# corp13 <- data.frame(
# Name = total.data$ORG_NAVN,
# CVR = total.data$CVR_NR,
# Adress = total.data$ADRESSE,
#
# Sector.Børsen = total.data$BØRSEN.BRANCHE,
# Turnover = total.data$OMSÆTNING.10,
# Turnover.change.09.10 = total.data$ÆNDRING.OMSÆTNING.9.10,
# Result.before.taxes = total.data$RESULTAT.FØR.SKAT.10,
# Result = total.data$NETTORESULTAT.10,
# Balance = total.data$BALANCE.10,
# Equity = total.data$EGENKAPITAL.10,
# Employees = total.data$ANSATTE.10,
# Employees.change.09.10 = total.data$ÆNDRING.ANSATTE.9.10,
#
# Component = total.data$component,
# Degree = total.data$deg,
# Betweenness = total.data$between,
# Closeness = total.data$close,
# Reach = total.data$n2,
#
# Memberships.corporations = total.data$Memberships...Corporations,
# Memberships.business.organisations = total.data$Memberships...Business.organisations,
# Memberships.interest.groups = total.data$Memberships...Interest.groups,
# Memberships.state = total.data$Memberships...State,
# Memberships.science.and.education = total.data$Memberships...Science.and.education,
# Memberships.culture = total.data$Memberships...Culture,
# Memberships.royal = total.data$Memberships...Royal,
# Memberships.leader.networks = total.data$Memberships...Leadership,
# Memberships.all.noncorporate = total.data$Memberships...All.noncorporate.sectors,
#
# All.media = total.data$all_media,
# National.newspapers = total.data$national_papers,
# Regional.newspapers = total.data$regional_papers,
# Local.newspapers = total.data$local_papers,
# Magazines = total.data$magazines,
# Radio.TV = total.data$radio_tv,
# Webmedia = total.data$websources,
# Newsbureaus = total.data$newsbureau,
# Books = total.data$books,
#
# Region = total.data$region,
# Coop = total.data$coop,
# Finance = total.data$finance,
# Stockexchange = total.data$stockexchange,
# Sector.dominance = total.data$dominerende,
# Global500 = total.data$Global
# )
#
# save(corp13, file = "~/soc.elite/data/corp13.rda")
##################################################################################
# Generate data
# source("~/My Dropbox/R/Elite/after_match_2.R")
#
# rel.all <- read.csv("~/My Dropbox/Elite/Data/Data/Relation_ALL.csv", sep="|", encoding="UTF-8", stringsAsFactor = FALSE)
#
# # BIQ LINK
# biq.id <- rel.all$BIQ_PERSON_ID
# biq.link <- paste("http://www.biq.dk/people/", biq.id, sep="")
# biq.link[biq.id == 0] <- ""
#
# # Essential columns
# # Gender
# load("~/My Dropbox/R/Elite/Navne/names_gender")
# gender.rel <- find.gender(as.character(rel.all$NAVN), names.gender)
# levels(gender.rel)[2] <- "Undefined"
#
# # CVR Number
# org.virk.other <- read.csv("~/My Dropbox/Elite/Dataindsamling/CSV/Organisation_BIQ_andre_virksomheder_connections.csv", sep="|", encoding="UTF-8")
# org.virk <- read.csv("~/My Dropbox/Elite/Dataindsamling/CSV/Organisation_BIQ.csv", sep="|", encoding="UTF-8")
# org.fond <- read.csv("~/My Dropbox/Elite/Dataindsamling/CSV/Organisation_BIQ_fonde.csv", sep="|", encoding="UTF-8")
#
# cvr.virk.other <- data.frame(ORG_NAVN = org.virk.other$ORG, CVR = org.virk.other$CVR_NR)
# cvr.virk <- data.frame(ORG_NAVN = org.virk$ORG, CVR = org.virk$CVR_NR)
# cvr.fond <- data.frame(ORG_NAVN = as.character(org.fond$NAVN), CVR = org.fond$CVR)
# cvr.all <- rbind(cvr.virk.other, cvr.virk, cvr.fond)
# cvr <- vector(length = nrow(rel.all))
#
# for (i in 1:nrow(cvr.all)) cvr[which(rel.all$ORG_NAVN == cvr.all$ORG_NAVN[i])] <- cvr.all$CVR[i]
#
# kilde <- as.factor(rel.all$kilde)
# levels(kilde) <- c("State", "Events", "Parliament", "Foundations", "Commissions",
# "NGO", "State", "Corporations", "VL-networks") # Her er der et ækelt hack der tager nogle grupper ud der havde "" som kilde og angiver dem til stat.
# org <- rel.all$ORG_NAVN
#
# # TAGS
#
# tag.frame <- rel.all[,grep("TAG", colnames(rel.all))]
# tag.frame <- apply(tag.frame, 2,as.character)
# tag.frame[tag.frame == ""] <- NA
#
# nif <- as.list(as.data.frame(t(tag.frame)))
# hurma <- lapply(nif, na.omit)
# tag.label <- unlist(lapply(hurma, paste, collapse = ", "))
#
#
#
# # Output
# data <- data.frame(NAME = rel.all$NAVN_MATCH,
# AFFILIATION = rel.all$ORG_NAVN,
# ROLE = rel.all$ROLE,
# GENDER = gender.rel,
# DESCRIPTION = rel.all$BESKRIVELSE,
# SOURCE = kilde,
# BIQ_LINK = biq.link,
# CVR = cvr,
# TAGS = tag.label
# )
#
#
# # Translated version
# # Export
# den <- data
# save(den, file = "~/soc.elite/data/den.rda")
#
###########################################################################
# Power elite 2013 [pe13]
# ind <- read.csv("~/My Dropbox/Elite/Data/Data/Individuals_elite.csv", sep = "|", stringsAsFactors = FALSE, encoding = "UTF-8", dec = ".", na.strings = c("", NA))
# load("~/My Dropbox/R/hoved/saved_results")
#
# ind <- ind[order(ind$Name),]
#
# # Check ordering
# all.equal(as.character(ind$Name), V(net.elite)$name)
#
# # Order of levels in Sector_cat
# oo <- c(
# "Business: Top 200",
# "Business: Multiposition",
# "Business: Medium-small",
# "Business: Investment and Pensions",
#
# "Interest groups: Employers and business",
# "Interest groups: Unions",
#
# "Interest groups: Farming",
# "Interest groups: Consumers",
#
# "State and politics: Royal court",
# "State and politics: Politics",
# "State and politics: Public Officials",
#
# "Science and education: University leaders",
# "Science and education: Economists and political scientists",
# "Science and education: Other scientists",
# "Science and education: Education",
#
# "Culture and law: Culture and charities" ,
# "Culture and law: Law"
# )
#
# ind$Sector_order <- factor(ind$Sector_order, levels = oo)
#
# # Rownames
# rownames(ind) <- ind$Name
#
# net.elite <- upgrade_graph(net.elite)
#
# # Save
# pe13 <- ind
#
# # get coordinates
#
# load(file = "~/Dropbox/R/politiken/outputs_politiken/SAVE/layout.Rda")
#
# add_layout_(graph = net.elite, lay[,-1])
#
#
# V(net.elite)$layout.x <- lay[,2]
# V(net.elite)$layout.y <- lay[,3]
#
# net.elite$layout <- cbind(x = lay[, 2], y = lay[, 3])
#
# save(pe13, net.elite, file = "~/eliter/data/pe13.rda")
##########################################################################
# Names.gender ----
# load("raw_data/names_gender")
#
# ###############################################################################
# # # Postnumre
# postnumre <- read.csv("raw_data/postnumre.csv", sep = ";", fileEncoding = "UTF-8")
#
# #
# #
# save(names.gender, postnumre, file = "R/sysdata.rda")
#
###############################################################################
# Firmaets mænd - Directors 2008
# directors08 <- read.csv("~/My Dropbox/Elite/Data/Firmaets Mænd 2008/Data_directors_2008.csv", sep = ",", fileEncoding = "UTF-8", dec = ",")
# save(directors08, file = "~/soc.elite/data/directors08.rda")
# Save den17 ----
# den17 <- read_delim("raw_data/den17.csv", delim = ";")
# save(den17, file = "data/den17.rda")
|
7bf4d4dc5ab16279aca84d4942d4a92195f0064b | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/unrepx/examples/examples.Rd.R | ca4b7916fdbcae351fe304b471543e2a52335fac | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 694 | r | examples.Rd.R | library(unrepx)
### Name: pdEff
### Title: Effect examples
### Aliases: pdEff bikeEff viseEff shnkEff shnkDisp
### Keywords: datasets
### ** Examples
require("unrepx")
parplot(bikeEff, method = "Zahn")
opar <- par(mfcol = c(1,2))
hnplot(shnkEff, half = FALSE, main = "Normal plot")
hnplot(shnkEff, half = TRUE, main = "Half-Normal plot")
# Why ordinary normal plots are a bad idea
# Both plots have the same reference line
par(opar)
# Note - Examples in help pages for hnplot, parplot, refplot, and eff.test
# use pdEff for illustration
## Not run:
##D # Do try this at home:
##D hnplot(viseEff, ID = TRUE)
##D refplot(viseEff, ID = TRUE)
## End(Not run)
|
d0bc3256bc00ed735cacb53b11ef800521f8a288 | 06b9d2ece554bda6b4402785bc9c7b7a627a6c2f | /man/plotZScoresForFleets.Rd | f7840ae3a0204bde3fe68c1fa7e2394ea1223347 | [
"MIT"
] | permissive | wStockhausen/rTCSAM2015 | 4f2dd392b32d9a3ea9cce4703e25abde6440e349 | 7cfbe7fd5573486c6d5721264c9d4d6696830a31 | refs/heads/master | 2020-12-26T04:56:03.783011 | 2016-09-30T01:59:06 | 2016-09-30T01:59:06 | 26,103,387 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 680 | rd | plotZScoresForFleets.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotZScoresForFleets.R
\name{plotZScoresForFleets}
\alias{plotZScoresForFleets}
\title{Plot model fits to abundance, biomass and size frequencies as z-scores for fleet data components}
\usage{
plotZScoresForFleets(repObj, type = "fishery", showPlot = TRUE,
verbose = FALSE)
}
\arguments{
\item{repObj}{- model report list object}
\item{type}{- fleet type ('fishery' or 'survey')}
\item{showPlot}{- flag (T/F) to show plots immediately}
\item{verbose}{- flag (T/F) to print diagnostic info}
}
\value{
list by fleet of lists with ggplot objects
}
\details{
Uses \code{plotZScoresForCatchData()}.
}
|
e6031b47d3dbbab79807a45dc2d31d4f55cd0f61 | 6096980be53d6fe90e3c189474257762eb2a1159 | /man/SamplingDist.Rd | e343a89ebf33f02dcc162ba1881b469d2a99a37e | [] | no_license | Doctor-Gunn/DSTools | 666d9e1f1600dbbf189303fd3fd25411f868bc07 | c398006865ddb114903285d571fb0d110991b253 | refs/heads/master | 2021-04-27T12:33:10.752928 | 2018-03-08T00:32:14 | 2018-03-08T00:32:14 | 122,420,853 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 361 | rd | SamplingDist.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SamplingDist.R
\name{SamplingDist}
\alias{SamplingDist}
\title{Generates a poor man's sampling disribution}
\usage{
SamplingDist(samples = 50, sample_size = 100)
}
\arguments{
\item{samples:}{Specifcy how many samples to run}
\item{sample_size:}{the number of obs in each sample}
}
|
1abda52aa491affc50ea2ed5194b3553e25782f1 | 2bef884993087187e8b7dd0928d595c0f99dbe29 | /man/dart.Rd | b2e67163f3d15a2f61fde7db1b0abb8d878f7eb5 | [] | no_license | dwnadler/rEHS | 5885662d3cce2d7ab7fe0fb6be145de9dcfce1b8 | aed15f6f584e244dd76ab5768e9060300d8a58d4 | refs/heads/main | 2023-08-19T09:05:09.885111 | 2021-09-24T21:00:34 | 2021-09-24T21:00:34 | 405,216,434 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,766 | rd | dart.Rd | \name{dart}
\alias{dart}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Days away/restricted or transfer rate
}
\description{
A calculation that describes the number of recordable injuries and illnesses per 100 full-time employees that resulted in days away from work, restricted work activity and/or job transfer that a company has experienced in any given time frame.
}
\usage{
dart(dart.incidents, total.labor.hours)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dart.incidents}{
number of dart-related incidents as defined above
}
\item{total.labor.hours}{
total number of hours employees worked in a reporting period
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
## dart(2, 28000)
## "DART: 14.29"
## For every 100 employees, 14.29 incidents resulted in lost or restricted days or job transfer due to work related injuries or illnesses.
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory (show via RShowDoc("KEYWORDS")):
% \keyword{ ~kwd1 }
% \keyword{ ~kwd2 }
% Use only one keyword per line.
% For non-standard keywords, use \concept instead of \keyword:
% \concept{ ~cpt1 }
% \concept{ ~cpt2 }
% Use only one concept per line.
|
c8877fb0f84b871fc8d5100ab10949840c076de5 | d3d2fbf3eaf7075f4679997c422fefaa5d524f67 | /Visualization Techniques.R | b1b54e015cfea593e56ccce750c10dbef287659e | [] | no_license | harshitsaini/Business-Analytics-Data-Mining | eaca89f755d15ecbb580be57e2ff95a725086a33 | 9a0613906c2a7f945cbd2a2855d07dc9b2e98778 | refs/heads/master | 2021-04-03T09:12:41.303022 | 2018-04-23T12:22:37 | 2018-04-23T12:22:37 | 125,231,756 | 15 | 5 | null | null | null | null | UTF-8 | R | false | false | 3,837 | r | Visualization Techniques.R | library(xlsx)
df1= read.xlsx(file.choose(),1, header= T)
df1= df1[,!apply(is.na(df1), 2,all)]
Age= 2017- df1$Mfg_Year
df1= cbind(df1,Age)
dffb= df1
df1= df1[,-c(1,2,3)]
head(df1)
str(df1)
df1$Transmission= as.factor(df1$Transmission)
df1$C_Price= as.factor(df1$C_Price)
str(df1)
summary(df1)
dfb=df1
df1=df1[-23,]
dffb[dffb$Price>70,]
dffb[dffb$Price>12,]
dffb[dffb$KM>150,]
dffb= dffb[-c(13,23,29,65,73),]
range(dffb$KM)
range(dffb$Price)
plot(dffb$KM,dffb$Price, xlim= c(25,120), ylim=c(1,9),xlab="KM",ylab="Price", panel.first = grid())
#dffb$Model= as.factor(dffb$Model)
#dffb$Model= as.numeric(dffb$Model)
text(dffb$KM,dffb$Price , dffb$Model, adj= c(-0.4,-0.4), cex= 0.5)
#dffb$Model= as.factor(dffb$Model)
df3= read.xlsx(file.choose(),1, header= T)
df3= df3[,!apply(is.na(df3), 2,all)]
palette()
palette(c("gray","black"))
plot(df3$Income, df3$Spending, xlim=c(0,225), ylim=c(0,11),
xlab="Income", ylab="Spending", col= as.factor(df3$Promoffer),
pch=19, cex=0.8, panel.first = grid())
plot(jitter(df3$Income,1), df3$Spending, xlim=c(0,225), ylim=c(0,11),
xlab="Income", ylab="Spending", col= as.factor(df3$Promoffer),
pch=20, cex=0.8, panel.first = grid())
par(mar=c(4,4,1,1), oma=c(1,1,1,1))
plot(jitter(df3$Income,1), df3$Spending, log= "xy",
xlab="Income", ylab="Spending", col= as.factor(df3$Promoffer),
pch=20, cex=0.7, panel.first = grid())
palette("default")
#MULTIVARIATE PLOT
#PARALLEL COORDINATES PLOT
library(MASS)
par(mfrow=c(2,1), cex=0.6, mar= c(3,3,0,0), oma=c(1,1,1,1))
df4= df1
levels(df4$Fuel_type)=1:length(levels(df4$Fuel_type))
df4=as.data.frame(lapply(df4,FUN=as.numeric))
parcoord(df4[which(df4$C_Price=='1'),-c(4,8)])
axis(2,at=axTicks(2), labels=c("0%","20%","40%","60%","80%","100%"))
grid()
parcoord(df4[which(df4$C_Price=='2'),-c(4,8)],col="gray")
axis(2,at=axTicks(2), labels=c("0%","20%","40%","60%","80%","100%"))
grid()
#Specialized Visualization
#Network Data
#Network Graph
#Two mode or bipartite graphs
#Example for association rules
item1= sample(LETTERS[1:10], size=50 ,T)
pool= letters[1:10]
item2=NULL
for(i in 1:50) {
item2=c(item2,sample(pool[-which(pool==tolower(item1[i]))],size=1,replace=T))
}
df5= data.frame(item1,item2)
library(igraph)
g= graph_from_data_frame(df5,directed = F)
V(g)$label= V(g)$name
V(g)[1:10]$type=1
V(g)[11:20]$type=2
V(g)$color= "gray"
E(g)$color= "black"
V(g)$shape= "circle"
V(g)$x= c(runif(10,0,5),runif(10,10,15))
V(g)$y= c(seq(10,1,by=-1),seq(10,1,by=-1))
E(g)$weight= count.multiple(g)
g1= simplify(g, remove.multiple = T)
E(g1)$width= 0.5*E(g1)$weight
size= NULL
for(i in V(g1)$name){
size=c(size,length(E(g1)[from(V(g1)[i])]))
}
V(g1)$size= 4*size
par(mar= rep(.1,4))
V(g1)$color= "gray"
E(g1)$color= "black"
plot(g1)
#Heirarchical Data
#Treemaps
df6= read.xlsx(file.choose(),1, header= T)
df6= df6[,!apply(is.na(df6), 2,all)]
library(treemap)
rec.size= ifelse(df6$price>=5000+df6$price/10, df6$price)
df6= cbind(df6,rec.size)
par(mar= rep(.1,4))
treemap(df6,index= c("item.category","subcategory","brand"),
vsize= "rec..size", vColor="rating",
type= "value", fun.aggregate = "mean",
palette = gray(0:4/4), fontsize.labels = c(11,9,6),
title= "", position.legend = "none")
#Geographical data
#Map chart
df7= read.xlsx(file.choose(),1, header= T)
df7= df7[,!apply(is.na(df7), 2,all)]
library(rworldmap)
mapDevice(rows= 2, columns= 1)
datamap= joinCountryData2Map(df7, nameJoinColumn = "Country", joinCode = "Name")
mapCountryData(datamap, nameColumnToPlot = "Inclusive.Internet.Index",
catMethod = "pretty", colourPalette = gray(7:0/7),
addLegend = F)
mapCountryData(datamap, nameColumnToPlot = "Corruptions.Perceptions.Index",
catMethod = "pretty", colourPalette = gray(7:0/7),
addLegend = F)
|
472aebb4e8339c5ac52bf9637d229a812521fcd6 | 6aaa50057d1b671b491761e6f4eb968ddf576825 | /DA_scripts/generate_most_likely_outcome.R | c4a5a54c908bd933205e14dd5affcf913841ba51 | [] | no_license | mingkaijiang/EucFACE_carbon_budget_paper | 6414e4c5c31c892ce2b56fc28cdc99b617cc2b4d | fa9c3e402c537936f6bd87fd9ac5ed46d04633c9 | refs/heads/master | 2021-08-17T03:15:22.341518 | 2020-12-28T23:10:52 | 2020-12-28T23:10:52 | 237,180,253 | 2 | 4 | null | null | null | null | UTF-8 | R | false | false | 2,071 | r | generate_most_likely_outcome.R | generate_most_likely_outcome <- function(inDF, obs) {
### prepare output
outDF <- inDF[3,]
ncol <- ncol(outDF)
for (i in 1:ncol) {
myden <- density(inDF[,i])
outDF[1,i] <- myden$x[which.max(myden$y)]
outDF[2,i] <- mean(inDF[,i])
outDF[3,i] <- median(inDF[,i])
}
### delete unused columns
outDF$logli <- NULL
outDF$Prior <- NULL
outDF$aic <- NULL
outDF$bic <- NULL
outDF <- rbind(outDF, 1)
outDF$Cat <- c("Pred_pdf",
"Pred_mean",
"Pred_median",
"Observed")
### assign observed values
outDF[outDF$Cat=="Observed", 1:no.var] <- NA
outDF$GPP[outDF$Cat=="Observed"] <- obs$GPP.mean
outDF$NPP.leaf[outDF$Cat=="Observed"] <- obs$NPP.leaf.mean
outDF$NPP.wood[outDF$Cat=="Observed"] <- obs$NPP.wood.mean
outDF$NPP.root[outDF$Cat=="Observed"] <- obs$NPP.root.mean
outDF$NPP.myco[outDF$Cat=="Observed"] <- obs$GPP.mean - obs$Ra.mean - obs$NPP.leaf.mean - obs$NPP.wood.mean - obs$NPP.root.mean
outDF$NPP[outDF$Cat=="Observed"] <- outDF$NPP.leaf[outDF$Cat=="Observed"] + outDF$NPP.wood[outDF$Cat=="Observed"] + outDF$NPP.root[outDF$Cat=="Observed"] + outDF$NPP.myco[outDF$Cat=="Observed"]
outDF$CUE[outDF$Cat=="Observed"] <- 1 - (obs$Ra.mean/obs$GPP.mean)
outDF$delta.Cleaf[outDF$Cat=="Observed"] <- obs$delta.C.leaf.mean
outDF$delta.Croot[outDF$Cat=="Observed"] <- obs$delta.C.root.mean
outDF$delta.Cmyco[outDF$Cat=="Observed"] <- obs$delta.C.myco.mean
outDF$delta.Cag[outDF$Cat=="Observed"] <- 0.0
outDF$delta.Cbg[outDF$Cat=="Observed"] <- 0.0
outDF$delta.Cmicr[outDF$Cat=="Observed"] <- obs$delta.C.micr.mean
outDF$delta.Csoil[outDF$Cat=="Observed"] <- obs$delta.C.soil.mean
outDF$delta.Cwood[outDF$Cat=="Observed"] <- obs$delta.C.wood.mean
outDF$Rhet[outDF$Cat=="Observed"] <- obs$Rhet.mean
outDF$alloc.myco[outDF$Cat=="Observed"] <- NA
print(outDF)
} |
a1ceccf68601aa16df8854d96ebe21b0dc650757 | 8313157297eca4c7fe2628497268095fc0612d1f | /RHadoop/RCaseStudyUpdated/Scripts/13.r | 57de5faca06ca2d4e2d1f9751d3a77eabd501064 | [] | no_license | jayaise/bigdata-Raj | 0ea82fbf4da4020c2d80a10889d8da5eb18b0ce2 | 9bff03009522f082e9b9a6e048e209cd50d9fa0a | refs/heads/master | 2020-05-16T13:10:53.496322 | 2019-04-23T18:51:08 | 2019-04-23T18:51:08 | 183,067,349 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 739 | r | 13.r | # create table, load data
rhive.execute("create table avg_recharge as select phone_number, (total_recharge_amount/12) as monthly_avg_recharge, plan_type from RHive_Username.user_sales")
# create table, load data
rhive.execute("create table RHive_Username.combo_offer_list as select phone_number, x.monthly_recharge from (select phone_number, SUM(monthly_avg_recharge) as monthly_recharge from RHive_Username.avg_recharge group by phone_number) x where x.monthly_recharge >= 500")
# select data
combo_offer_list = rhive.query("select * from RHive_Username.combo_offer_list limit 10")
print(combo_offer_list)
combo_offer_user_list = rhive.query("select phone_number from RHive_Username.combo_offer_list")
print(combo_offer_user_list) |
07e82253f26334c5b51cfa13a0ce97d62a772d75 | 3a6894fcd504c2843d9162cc57df2b073cb92d63 | /cachematrix.R | 9f1ed4dff24370d868c0d1b72e6cc9fcfc5546a7 | [] | no_license | Batte/ProgrammingAssignment2 | 9defea8c7f3a4ced3a07cff34251af564ef7a253 | 7dad9b1ce5daaffd7122763fdcb48b477cb439c8 | refs/heads/master | 2021-01-19T20:49:02.464442 | 2014-07-27T17:48:09 | 2014-07-27T17:48:09 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 859 | r | cachematrix.R | ## makeCacheMatrix creates 4 atomic functions
## set the value of the supplied matrix, get the matrix
## return the inverse of the matrix, from cache if possible
## otherwise calculate the inverse and cache the results
makeCacheMatrix <- function(x = matrix()) {
s <- NULL
set <- function(t) {
x <<- t
s <<- NULL
}
get <- function() x
setInverse <- function(solve) s <<- solve
getInverse <- function() s
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## cacheSolve returns the inverse of the original matrix x
## from cache if possible
cacheSolve <- function(x, ...) {
s <- x$getInverse()
if(!is.null(s)) {
message("inverse was already calculated and cached")
return(s)
}
data <- x$get()
s <- solve(data, ...)
x$setInverse(s)
## Return a matrix s that is the inverse of 'x'
s
}
|
6e86202a86b69cc09480a1598ecd5c71785471c9 | aeb9c1b695b40727c7cb70019ba67a3cbbd03cf2 | /docssource/rhelp/schoolexam.Rd | 2d6bc8b96ef1e0fc6765b0a4e9764f6648e446d3 | [] | no_license | gamlj/gamlj.github.io | ed589b9ac86903902ab55ebbbacffe03afe9b5e4 | d774d46b64040cee63a834d939538a816b6db0b9 | refs/heads/master | 2023-06-14T14:27:12.453343 | 2023-06-12T17:33:25 | 2023-06-12T17:33:25 | 183,435,098 | 2 | 3 | null | 2023-06-12T16:12:02 | 2019-04-25T12:59:14 | CSS | UTF-8 | R | false | true | 405 | rd | schoolexam.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{schoolexam}
\alias{schoolexam}
\title{Clustered data for logistic regression}
\usage{
data(schoolexam)
}
\description{
Simulated data for testing logistic mixed models.
Simulation of pupils data clustered across schools with a dichotomous outcome
}
\examples{
data(schoolexam)
}
\keyword{datasets}
|
1176050bbbfd80301c311d88cdd928e3fbefb04e | 660cdaae5d32b0abbfdbb400fa22db5ce5cefc3a | /R/prepareforest.R | 669e12612242ffdf7fad47a275ad89ebed7d42c2 | [] | no_license | Riodinino/TROLLmsc | 19d3a9ad9c762af37edab19de13c9b121b2007c2 | cf06915f673adae5a02bd2164bb2f109f472a63d | refs/heads/master | 2020-03-15T13:28:25.305461 | 2018-05-16T23:48:58 | 2018-05-16T23:48:58 | 132,167,533 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,867 | r | prepareforest.R | #' @import tidyverse
library(tidyverse)
#'
#' Title
#'
#' @param data data.frame ; Basically the censuses for the plot you want to work on.
#' @param year Integer. The year corresponding to your simulation startpoint.
#' @param censuscol Character. Name of the column indicating the censuses years.
#' @param label Character.Name of the column indicating species labels, if you have defined them.
#' @param speciescol Character. The name of the column indicating species identity. Should be of the form Genus_species matching case and separator.
#' @param speciestraits Data.frame. The table containing your species-specific traits that will be used to parametrise TROLL.
#' @param replacement Character. Procedure you choose to replace individuals belonging to missing species.
#' @param X Character. The name of the column containing X coordinates. At the plot scale, don't put an UTM or other shit !
#' @param Y Character. The name of the column containing Y coordinates. At the plot scale, don't put an UTM or other shit !
#' @param dbh Character. Name of the column containing the diameters at breast height. Circumferences are handler in the "extractplot" function.
#' @param milimeters Logical. Indicates whether the diameters are in milimeters or not.
#' Defaults to FALSE as this unit is unusual in most censuses.
#'
#' @return
#' @export
#'
#' @examples
prepareforest <- function (data, year = 1992, censuscol="campagne",label = "sp_lab", speciescol = "species",
speciestraits, replacement = "local",i_arbre = "i_arbre", X = "X", Y = "Y", dbh = "dbh", milimeters = T, plotcol = "n_parcelle") #???duplicated_method = "Strange"
{
data <- filter(data, dbh >100)
# Safety checks -----------------------------------------------------------
if(!(speciescol %in% names(data)))
stop("The species column you indicated doesn't match with dataset labels")
if (!(censuscol %in% names(data)))
stop("The census colum you indicated doesn't match with dataset labels")
data <- subset(data, data[which(names(data) == censuscol)] == year)
# print(summary(data[,which(names(data) == dbh)]))
if (dim(data)[1] == 0) {stop("No data for this census year !")}
if(!"sp_lab" %in% names(speciestraits)){speciestraits$sp_lab = 1:nrow(speciestraits)}
if(class(speciestraits[,which(names(speciestraits) == speciescol)]) == "factor")
{
speciestraits[,which(names(speciestraits) == speciescol)] <- as.character(speciestraits[,which(names(speciestraits) == speciescol)])
}
if(class(data[,which(names(data) == speciescol)]) == "factor")
{
data[,which(names(data) == speciescol)] <- as.character(data[,which(names(data) == speciescol)])
}
# Isolating parametrized and unparametrized species -----------------------
## Parametrized
# print(paste("nrow data" , nrow(data)))
param <- data[which(data[,which(names(data) == speciescol)]%in%speciestraits[,which(names(speciestraits) == speciescol)]),]
param$sp_lab = rep(NA,nrow(param))
# print(paste("nrow param" , nrow(param))) #debug check
## Missing
miss <- data[which(!(data[,which(names(data) == speciescol)]%in%speciestraits[,which(names(speciestraits) == speciescol)])),]
miss$sp_lab = rep(NA,nrow(miss))
miss <- miss %>% arrange(dbh)
# print(paste("nrow miss" , nrow(miss))) #debug check
## Diagnostics
print(paste0("Il y a ",nrow(unique(param[which(names(param) == speciescol)])),
" especes correspondant a celles de TROLL, et ", nrow(unique(miss[which(names(miss) == speciescol)])),
" especes manquantes, ce qui représente ", nrow(miss)*100/nrow(data), " % des individus de la parcelle."))
warn <- paste0("Il y a ",nrow(unique(param[which(names(param) == speciescol)])),
" especes correspondant a celles de TROLL, et ", nrow(unique(miss[which(names(miss) == speciescol)])),
" especes manquantes, ce qui représente ", nrow(miss)*100/nrow(data), " % des individus de la parcelle", unique(data[,which(names(data)== plotcol)]) )
write.table(warn, file = file.path("C:/Users/nino.page/Desktop/TROLL project/treated_data/sim_prepare_paracou/warnings2",paste0(unique(data[,which(names(data)== plotcol)])[1],year,".txt")))
# Sp_lab correspondance ---------------------------------------------------
# print(length(which(is.na(param$sp_lab == T))))#debug
# print(length(param[,which(names(param) == speciescol)]))#debug
for(i in 1:length(param[,which(names(param) == speciescol)]))
{
param$sp_lab[i] <- speciestraits[which(speciestraits[,which(names(speciestraits) == speciescol)] == param[i,which(names(param) == speciescol)]), which(names(speciestraits) == label)]
}
# print(paste("is it working",length(which(is.na(param$sp_lab == T)))))#debug
# Replacement procedure ---------------------------------------------------
## Sampling weighted by local frequencies
if (replacement == "local")
{
if(nrow(miss)*100/nrow(data) > 50)
sampl <- dplyr::sample_n(param, nrow(miss), replace = T)
else
sampl <- dplyr::sample_n(param, nrow(miss), replace = F)
}
else if (replacement == "regional")
{
if(!(Freg %in% names(speciestraits)))
stop("The Regional Frequencies column you indicated doesn't match with the dataset labels.")
sampl <- sample(unique(speciestraits[which(names(speciestraits) == speciescol)]),size = nrow(miss),prob = as.vector(speciestraits[which(names(speciestraits) == Freg)]), replace = F)
}
else if(replacement == "diameter")
{
breaks <- c(100,150,300,420,675,1000,99999)
sampl = param[c(1,2),]
dbhcol <- which(names(param)==dbh)
for(i in 2:length(breaks))
{
target <- param[which(param[,dbhcol] >= breaks[i-1] & param[,dbhcol] < breaks[i]),]
classe <- miss[which(miss[,dbhcol] >= breaks[i-1] & miss[,dbhcol] < breaks[i]),]
lengthclass <- nrow(classe)
print(paste("Classe ", breaks[i-1], ":", breaks[i], " - ",lengthclass, "Individus"))
print(paste("Param ", breaks[i-1], ":", breaks[i], " - ",nrow(target), "Individus"))
if(lengthclass > 0) #If there are individuals belonging to missing species in thisdiameter class
{
if(nrow(target)>0) #If there are individuals of known species in this same clas
{
if(nrow(target)>= lengthclass)
{
temp <- dplyr::sample_n(target, size = lengthclass, replace = F)
}
else
{
temp <- dplyr::sample_n(target, size = lengthclass, replace = T)
}
}
else #If not then use a neighbour class to sample
{
if(i == 2) # If this is the first diameter class, jump to next
{
target <- param[which(param[,dbhcol] >= breaks[i] & param[,dbhcol] < breaks[i+1]),]
}
else # Else, jump to previous
{
target <- param[which(param[,dbhcol] >= breaks[i-2] & param[,dbhcol] < breaks[i-1]),]
}
#Sampling
if(nrow(target) >= lengthclass)
{
temp <- dplyr::sample_n(target,size = lengthclass,replace = F)
}
else
{
temp <- dplyr::sample_n(target,size = lengthclass,replace = T)
}
}
# if(nrow(temp)>1)
sampl <- rbind(sampl, temp) #Update sample
# temp <- temp[-c(2:nrow(temp)),]
print(paste("nrow temp", nrow(temp)))
}
}
sampl <- sampl[c(-1,-2),] #Take off the 2 first lines that are extra (gotodef)
}
print(nrow(miss));print(nrow(sampl))
#Replace the sample tree characteristics by those of the missing (could be done reversely, but I orignially did this dirty way and it works since these are the variables we keep at the end : dbh, species, x, y, ID ; should be adapted if derived for every other use.)
sampl$X <- miss[,which(names(miss) == X)] #May be written x
sampl$Y <- miss[,which(names(miss) == Y)] #%idem y
sampl$dbh <- miss[,which(names(miss) == dbh)] #the DBH is kept, obviously; the only risk is to obtain irrealistically big individuals but TROLL corrects when dbh > 1.5 dmax.
sampl$i_arbre <- miss[,which(names(miss) == i_arbre)]
forest <- rbind(param,sampl)
# else if (replacement =="classes")
# {
#
# }
# Cleaning coordinates -------------------------------------
forest$suppr <- rep(F, nrow(forest)) #Logical vector to tag individuals to be supressed
# Scaling and rounding coordinates prior to process
if(!(min(forest[,which(names(forest)== X)]) >= 0))
{
warning("Please note that you have negative coordinates for X")
forest[,which(names(forest)== X)] <- forest[,which(names(forest)== X)] - min(forest[,which(names(forest)== X)])
}
if(!(min(forest[,which(names(forest)== Y)]) >= 0))
{
warning("Please note that you have negative coordinates for Y")
forest[,which(names(forest)== Y)] <- forest[,which(names(forest)== Y)] - min(forest[,which(names(forest)== Y)])
}
# print(paste(class(forest[,which(names(forest)== X)]), class(forest[,which(names(forest)== Y)])))#debug
if(class(forest[,which(names(forest)== X)]) != "integer" | class(forest[,which(names(forest)== Y)]) != "integer")
warning("Coordinates are rounded in this function for the sake of safety")
forest[which(names(forest)== X)] <- round(forest[which(names(forest)== X)],digits = 0)
forest[which(names(forest)== Y)] <- round(forest[which(names(forest)== Y)],digits = 0)
final_forest <- forest[,c(which(names(forest)== X),which(names(forest)== Y),which(names(forest)== dbh),which(names(forest)== label), which(names(forest)== "i_arbre"))]
if(milimeters == FALSE)
{
warning("dbh must be milimeters. It will be mutiplicated by 10 assuming you use centimeters. If not, consider pre-treating data.")
data[,which(names(data) == dbh)] <- 10*data[which(names(data) == dbh)]
}
return(final_forest)
}
|
8b3abdfe309e4360e0ed1dc7f77b9c126860b7d5 | 43a70fcc3c3558b2fc9296eb178fa4e215d3d756 | /R/wedge_excess_mortality_array.R | 729f3afe990473417e131e89b2ac1de9ad506286 | [] | no_license | laurettemhlanga/PopulationSimulation | ef1a166b3db12866e8daee739b66a0f290fc3014 | 9e7d4c3df4438f15cf5177612ed4ab551cb7a83b | refs/heads/master | 2021-12-23T15:23:23.017571 | 2021-08-09T15:18:07 | 2021-08-09T15:18:07 | 166,325,128 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,539 | r | wedge_excess_mortality_array.R |
#' wedge_excess_mortality_matrix
#'
#' a function that returns a matrix of probabilities of mortality for each age and time step of the simulation
#'
#' @param n_age_steps the optimal age each birth cohort is reaches
#' @param list_of_times a numeric vector of length min:max; indicates the range of ages to be included in simulation. Note that date format is not used.
#' @param excess_mortality the excess mortality function
#' @param time_step the time step between consecurtivelist_of_birth_times
#' @return returns an array of dimensions time, age and time since infection - tau, which is essentially the probability of surviving in the given time space.
#'
#' @export
#'
#'
#
#we need to agree values arising from calculations resulting from a < tau,
#at the moment we using an if statement to avoid such calculations.
wedge_excess_mortality_matrix <- function(n_age_steps, list_of_times, time_step,
excess_mortality
){
ages <- seq(from = time_step / 2, by = time_step, length.out = n_age_steps)
#ages <- seq(time_step, max_age, time_step)
taus <- ages
excess_mort <- matrix(NA, ncol = length(ages), nrow = length(taus))
for (time_s in seq_along(taus)){
excess_mort[time_s, ] <- ifelse(taus[time_s] <= ages ,
excess_mortality(ages = ages ,
times = list_of_times ,
times_since_i = taus[time_s] ), NA)
}
return(excess_mort)
}
|
9304944272544feda15ba8bc5181129369dea316 | 80b3a7af905335d4e04bc1644c93589a881e3634 | /tests/testthat/test-tabulate_throughputs_by.R | 8faf214722790826e67cd254db8588daca309e05 | [] | no_license | BAAQMD/qtytools | 53a19738fcce07c9aa71d3a9cb3f605bc8eddf58 | cf396ec102f1b071f65ee0588f9b1bc0c906c14a | refs/heads/master | 2022-07-12T13:21:17.251252 | 2022-04-05T17:54:25 | 2022-04-05T17:54:25 | 131,063,856 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 399 | r | test-tabulate_throughputs_by.R | context("tabulate_throughputs_by")
test_that("tabulate throughputs by year", {
tabulated <-
tabulate_throughputs_by(
ems_and_tput,
year)
expected <-
ems_and_tput %>%
sum_throughputs_by(
year) %>%
tidyr::spread(
year,
tput_qty) %>%
dplyr::select(
`1991`,
`1992`,
tput_unit)
expect_equal(
tabulated,
expected)
})
|
6eba240d93089783bc4810797ab1a8d5cf50eb47 | 8da9625871ae207aa2c943240a666c10f1708358 | /Noro_mash_lines.R | 9383dcf60645487cf92c09fead8c3c87fa073212 | [] | no_license | nvpatin/Norovirus_manuscript | 55f797b542e54b7e5de2a21c4f6b8109940a29ca | 767fbf7c5b1712ea521f5246722e7f11657957b9 | refs/heads/master | 2023-01-30T09:49:31.621911 | 2020-12-17T21:01:02 | 2020-12-17T21:01:02 | 289,370,477 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 899 | r | Noro_mash_lines.R | library(ggplot2)
df <- data.frame(noro_mash_T0)
line_types <- c("Mash Distance"=1, "NoV Titer"=2)
p <- ggplot(data=df, aes(x=Day, y=1000*Mash_dist, group=Individual)) +
geom_line(aes(color=factor(Individual), linetype="Mash Distance")) +
theme_classic() +
scale_x_continuous(breaks=c(0,3,6,9,12,15,18,21,24,27,30,33), expand = c(0, 0))
p <- p + geom_line(data=df, aes(x=Day, y=log(NoV_titer),
color=factor(Individual), linetype="NoV Titer"))
p <- p + scale_y_continuous(sec.axis = sec_axis(~exp(.), name="NoV Titer",
breaks=c(0,1e4,1e6,1e9)),
name="Mash Distance from T0", expand = c(0, 0))
p <- p + labs(color = "Individual", linetype="Data Set")
p <- p + scale_linetype_manual(values=line_types)
p
|
5327c0cb67501ea6537008d0729cfc96a83026f8 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/covr/examples/codecov.Rd.R | d4e6f662d3682c5d8da9b773b5224f9c32391dc7 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 201 | r | codecov.Rd.R | library(covr)
### Name: codecov
### Title: Run covr on a package and upload the result to codecov.io
### Aliases: codecov
### ** Examples
## Not run:
##D codecov(path = "test")
## End(Not run)
|
9a6cc9095b4b3b456e8532c559269fc794bdba56 | 29ced85982f8f7739f6b4df28f042c2299456549 | /Navigation/CalEff.R | 1131c3d87f25a06febf6cf98433268d8e066fecc | [] | no_license | ATLAS-HUJI/R | f5a056f5b9e82b277a2c1f41ad3c9f746e585fe1 | d74b5d21c7b8e70e42620633159bad6e887b391b | refs/heads/master | 2021-01-24T02:15:51.298877 | 2018-10-16T13:11:48 | 2018-10-16T13:11:48 | 122,840,774 | 4 | 3 | null | null | null | null | UTF-8 | R | false | false | 502 | r | CalEff.R | #function to determine track efficency over a set of x,y coordinates
#code by Ingo Schiffner
CalEff <- function (x,y)
{
#remove na values
x_n<-x[!is.na(x)]
y_n<-y[!is.na(y)]
#get track length
xd<-diff(x_n)
yd<-diff(y_n)
TrackLength<-sum((xd^2 + yd^2)^0.5)
#get beeline distance
xb<-x_n[1]-x_n[length(x_n)]
yb<-y_n[1]-y_n[length(y_n)]
Beeline<-(xb^2 + yb^2)^0.5
Efficiency<-round(Beeline/TrackLength *100)/100
list(TrackLength,Beeline,Efficiency)
} |
3c657fecc25de857acc6bbcb4bf5580251029a4e | 54f97a3df0b3c48f511a6541eaa18b5bbc213319 | /R/utility_functions.R | 3176dec7a72bbdec021d5cb3e9b6eaa8be4f79cc | [] | no_license | ethanwhite/prism | cb73c1cbe8f1b2decad032199bc2a45ed9daf61e | 365bae6077b0ffe4eba7af8dacca9ca0177444e1 | refs/heads/master | 2021-01-24T15:06:42.263954 | 2015-07-06T21:40:57 | 2015-07-06T21:40:57 | 38,751,449 | 0 | 0 | null | 2015-08-03T13:22:07 | 2015-07-08T12:03:16 | R | UTF-8 | R | false | false | 5,462 | r | utility_functions.R | #' @title Get daily data file names for last 2 years
#' @description Utility function to download all filenames
#' for the last two years. This is to determine which are
#' currently showing as provisional, early, or stable.
#' @inheritParams get_prism_dailys
#' @param frequency \code{character} for the frequency. One of
#' "daily" or "monthly".
#' @importFrom RCurl getURL
#' @importFrom lubridate year
get_recent_filenames <- function(type, frequency) {
frequency <- match.arg(frequency, c("daily", "monthly"))
full_path_this <- paste("ftp://prism.nacse.org", frequency,
type, year(Sys.Date()), "", sep = "/")
filenames_this <- getURL(full_path_this, ftp.use.epsv = FALSE, dirlistonly = TRUE)
filenames_this <- strsplit(filenames_this, split = "\r\n")[[1]]
full_path_last <- paste("ftp://prism.nacse.org", frequency,
type, year(Sys.Date()) - 1, "", sep = "/")
filenames_last <- getURL(full_path_last, ftp.use.epsv = FALSE, dirlistonly = TRUE)
# Stores all the filenames for this and last year
c(filenames_this, strsplit(filenames_last, split = "\r\n")[[1]])
}
#' helper function for handling months
#' @description Handle numeric month to string conversions
#' @param month a numeric vector of months (month must be > 0 and <= 12)
#' @return a character vector (same length as \code{month}) with 2 char month strings.
#' @examples
#' mon_to_string(month = c(1, 3, 2))
#' mon_to_string(month = 12)
mon_to_string <- function(month){
out <- vector()
for(i in 1:length(month)){
if(month[i] < 1 || month[i] > 12){stop("Please enter a valid numeric month")}
if(month[i] < 10){ out[i] <- paste("0",month[i],sep="")}
else { out[i] <- paste0(month[i]) }
}
return(out)
}
#' handle existing directory
#' @description create new directory for user if they don't have one to store prism files
path_check <- function(){
user_path <- NULL
if(is.null(getOption('prism.path'))){
message("You have not set a path to hold your prism files.")
user_path <- readline("Please enter the full or relative path to download files to (hit enter to use default '~/prismtmp'): ")
# User may have input path with quotes. Remove these.
user_path <- gsub(pattern = c("\"|'"), "", user_path)
# Deal with relative paths
user_path <- ifelse(nchar(user_path) == 0,
paste(Sys.getenv("HOME"), "prismtmp", sep="/"),
file.path(normalizePath(user_path, winslash = "/")))
options(prism.path = user_path)
} else {
user_path <- getOption('prism.path')
}
## Check if path exists
if(!file.exists(file.path(user_path))){
dir.create(user_path)
if (!file.exists(file.path(user_path))){
message("Path invalid or permissions error.")
options(prism.path = NULL)
}
}
}
#' Helper function to check if files already exist
#' @description check if files exist
#' @param prismfile a list of full paths for prism files
#' @return a character vector of file names that already exist
prism_check <- function(prismfile){
file_bases <- unlist(sapply(prismfile, strsplit, split=".zip"))
which_downloaded <- sapply(file_bases, function(base) {
# Look inside the folder to see if the .bil is there
# Won't be able to check for all other files. Unlikely to matter.
ls_folder <- list.files(file.path(getOption("prism.path"), base))
any(grepl("\\.bil", ls_folder))
})
prismfile[!which_downloaded]
}
#' Process pre 1980 files
#' @description Files that come prior to 1980 come in one huge zip. This will cause them to mimic all post 1980 downloads
#' @param pfile the name of the file, should include "all", that is unzipped
#' @param name a vector of names of files that you want to save.
#' @details This should match all other files post 1980
#' @examples \dontrun{
#' process_zip('PRISM_tmean_stable_4kmM2_1980_all_bil','PRISM_tmean_stable_4kmM2_198001_bil')
#' process_zip('PRISM_tmean_stable_4kmM2_1980_all_bil',c('PRISM_tmean_stable_4kmM2_198001_bil','PRISM_tmean_stable_4kmM2_198002_bil'))
#' }
process_zip <- function(pfile,name){
stop("Fix errors in process_zip")
tmpwd <- list.files(paste(options("prism.path")[[1]],pfile,sep="/"))
##Remove all.xml file
file.remove(paste(options("prism.path")[[1]],pfile,grep("all",tmpwd,value = T),sep="/"))
## Get new list of files after removing all.xml
tmpwd <- list.files(paste(options("prism.path")[[1]],pfile,sep="/"))
fstrip <- strsplit(tmpwd,"\\.")
fstrip <- unlist(lapply(fstrip,function(x) return(x[1])))
unames <- unique(fstrip)
unames <- unames[unames%in%name]
for(j in 1:length(unames)){
newdir <- paste(options("prism.path")[[1]],unames[j],sep="/")
dir.create(newdir)
f2copy <- grep(unames[j],tmpwd,value=T)
sapply(f2copy,function(x){file.copy(from = paste(options("prism.path")[[1]],pfile,x,sep="/"),to = paste(newdir,x,sep="/")) })
sapply(f2copy,function(x){file.remove(paste(options("prism.path")[[1]],pfile,x,sep="/")) })
### We lose all our metadata, so we need to rewrite it
}
### Remove all files so the directory can be created.
## Update file list
tmpwd <- list.files(paste(options("prism.path")[[1]],pfile,sep="/"))
## Now loop delete them all
sapply(tmpwd,function(x){
file.remove(paste(options("prism.path")[[1]],pfile,x,sep="/"))
})
file.remove(paste(options("prism.path")[[1]],pfile,sep="/"),recursive = T)
}
|
1f1a7a1dfd40290013af795dd0bb999fb1e87696 | 5458181b0f0baba0b924cc1178c4e9327aeb866d | /nhanes-analysis/activity.R | 8d2e19a8007e7562d2be6c618a3064d5a08f8b1b | [
"MIT"
] | permissive | creiland/demos | cd41ac2ea578a09e9467e27b74bb589524dcda22 | f419fb63288d6a0488c7752ffa5b2a104721c451 | refs/heads/master | 2021-01-12T02:02:35.421755 | 2017-03-01T19:04:37 | 2017-03-01T19:04:37 | 78,460,358 | 0 | 0 | null | 2017-01-09T19:17:14 | 2017-01-09T19:17:14 | null | UTF-8 | R | false | false | 1,345 | r | activity.R | # Calculate survey weighted estimates
# Set up
setwd('C:/Users/creil/Desktop/Winter 2017/Info498/demos/nhanes-analysis')
# library(foreign)
library(survey)
library(dplyr)
library(Hmisc)
# Load demographic data, which includes survey weights
demographics <- sasxport.get('./data/DEMO_H.XPT')
# Load alcohol use data
alcohol <- sasxport.get('./data/ALQ_H.XPT')
# Merge alcohol and demographic data
nhanes <- left_join(alcohol, demographics, by='seqn')
# Take the sum of the weighting column `wtint2yr` - what is this number?
wtint2yr_sum <- sum(nhanes$wtint2yr)
# Create a survey design that indicates the id, strata, and weights
survey_design <- svydesign(id=~seqn, strata=~sdmvstra,
weights=~wtint2yr,
data=nhanes, nest = TRUE)
# Using the codebook, find the question that asks about 12+ drinks in the last year
# Using the `table` function get a table of 12+ drinks in the last year responses
prop.table(table(nhanes$alq101))
# Using the `prop.table` function, get the proportions of each response
svytable(~alq101, design=survey_design)
# Using the `svytable` function, compute the survey weighted responses to the same question
prop.table(svytable(~alq101, design=survey_design))
# Using `prop.table` in conjunction with `svytable`, compute the survey weighted proportions
|
3f59f536b7b6c2ce2ad45d4cbefa4fe3bd9699ca | 9ee587651e82c3efdf58036364c197829ffa57e1 | /Chapter1_FineScaleAcousticSurvey/PreliminaryAnalysis_RecordingInterference/HOBODataOrganising.R | d1d53f4f0c0fef2238cd90ec78473fb7adc444f1 | [
"Apache-2.0"
] | permissive | QutEcoacoustics/spatial-acoustics | 7f0fd2af6663200ab529a2f8979eec56a0bf2e40 | 5e8eaba29576a59f85220c8013d0b083ddb70592 | refs/heads/master | 2023-04-15T09:50:44.063038 | 2023-03-14T23:36:36 | 2023-03-14T23:36:36 | 222,621,976 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,755 | r | HOBODataOrganising.R | library(tidyverse)
dir <- setwd("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Aug2019_HOBOS/backup")
dt <- read.csv("output/HOBOTEMPRH9_BOWRA_WB.csv") %>%
mutate(., "rec" = "13", "point" = "025", "transect" = "WB") %>%
write.csv(., "output/HOBOTEMPRH9_BOWRA_WB1.csv")
WB <- filter(data, day == c("13", "14", "15")) %>%
write.csv("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Aug2019_HOBOS/output/HOBOTEMPRH13_BOWRA_WB.csv")
WA <- filter(data, day == c("15", "16", "17")) %>%
write.csv("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Aug2019_HOBOS/output/HOBOTEMPRH13_BOWRA_WA.csv")
test <- read.csv("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Aug2019_HOBOS/output/HOBOTEMPRH9_BOWRA_WB1.csv") %>%
separate(., col = time, into = c("hour", "minute", "seconds"), sep = ":", remove = F) %>%
group_by(., hour, day, rec, point, transect) %>%
#group_by(., time) %>%
summarise_at(vars(Humidity, Temperature), mean)
files <- list.files(dir, pattern = "1.csv", full.names = T, recursive = F)
output <- gsub(files, pattern = ".csv", replacement = "")
output <- paste(output, "average.csv", sep = "")
for (file in files) {
read.csv(file) %>%
separate(., col = time, into = c("hour", "minute", "seconds"), sep = ":", remove = F) %>%
group_by(., hour, day, rec, point, transect) %>%
summarise_at(vars(Humidity, Temperature), mean) %>%
write.csv(., file)
}
files <- as.list(files)
df <- lapply(files, read.csv)
df <- do.call(rbind, df) %>%
write.csv(., "Avrg.csv") |
2dac5ae5972cae3a8afd8c257e91d3217b9f9c03 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/stringi/examples/stri_match.Rd.R | 2d978758e84633ce7c5a793e1c0dddf0b8b6822d | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,267 | r | stri_match.Rd.R | library(stringi)
### Name: stri_match_all
### Title: Extract Regex Pattern Matches, Together with Capture Groups
### Aliases: stri_match_all stri_match_first stri_match_last stri_match
### stri_match_all_regex stri_match_first_regex stri_match_last_regex
### ** Examples
stri_match_all_regex("breakfast=eggs, lunch=pizza, dessert=icecream",
"(\\w+)=(\\w+)")
stri_match_all_regex(c("breakfast=eggs", "lunch=pizza", "no food here"),
"(\\w+)=(\\w+)")
stri_match_all_regex(c("breakfast=eggs;lunch=pizza",
"breakfast=bacon;lunch=spaghetti", "no food here"),
"(\\w+)=(\\w+)")
stri_match_first_regex(c("breakfast=eggs;lunch=pizza",
"breakfast=bacon;lunch=spaghetti", "no food here"),
"(\\w+)=(\\w+)")
stri_match_last_regex(c("breakfast=eggs;lunch=pizza",
"breakfast=bacon;lunch=spaghetti", "no food here"),
"(\\w+)=(\\w+)")
stri_match_first_regex(c("abcd", ":abcd", ":abcd:"), "^(:)?([^:]*)(:)?$")
stri_match_first_regex(c("abcd", ":abcd", ":abcd:"), "^(:)?([^:]*)(:)?$", cg_missing="")
# Match all the pattern of the form XYX, including overlapping matches:
stri_match_all_regex("ACAGAGACTTTAGATAGAGAAGA", "(?=(([ACGT])[ACGT]\\2))")[[1]][,2]
# Compare the above to:
stri_extract_all_regex("ACAGAGACTTTAGATAGAGAAGA", "([ACGT])[ACGT]\\1")
|
5c3e2add9c279aab9e379c5bce3731e24222d0fd | 642d338221f44aad742b39230cffcbc297f48306 | /R/cbf_pcasl_Chen2011.R | 22c104d28075848f0b126dff833bd0a7ab27b657 | [] | no_license | stnava/itkImageR | 3577ccb5785893c510f6e0fabd6a68d5d6c095a2 | 8b218e81d8e3cc642c8891a4eb4c43dc941cf870 | refs/heads/master | 2016-09-05T14:29:26.851337 | 2013-11-25T22:53:34 | 2013-11-25T22:53:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,687 | r | cbf_pcasl_Chen2011.R | # chen 2011 paper pCASL --------------------------------------------------------------------------------------
cbf_pcasl_Chen2011 <- function(motionCorrectedImage, mask) {
# img <- antsImageRead( aslimg_filename , 4 , 'double' )
# moco_results <- motion_correction( img )
# img <- as.array( moco_results$moco_img )
img <- as.array(motionCorrectedImage)
numdiffs <- floor(dim(img)[4]/2)
labelimg <- img[, , , seq(2, by = 2, length.out = numdiffs)]
controlimg <- img[, , , seq(1, by = 2, length.out = numdiffs)]
lambda <- 0.9
deltaM <- (controlimg - labelimg)
alpha <- 0.85
M0 <- array(0, dim(controlimg)[1:3])
for (x in 1:(dim(M0)[1])) for (y in 1:(dim(M0)[2])) for (z in 1:(dim(M0)[3])) {
M0[x, y, z] <- mean(controlimg[x, y, z, ])
}
meanM0 <- M0
M0 <- rep(M0, numdiffs)
dim(M0) <- dim(controlimg)
T1b <- 1664
omega <- 1
tau <- 1.5
cbf <- (lambda * deltaM)/(2 * alpha * M0 * T1b * (exp(-omega/T1b) - exp(-(tau + omega)/T1b)))
cbf[is.nan(cbf)] <- 0
meanvalues <- array(0, dim(controlimg)[1:3])
for (x in 1:(dim(M0)[1])) for (y in 1:(dim(M0)[2])) for (z in 1:(dim(M0)[3])) {
meanvalues[x, y, z] <- 5400 * mean(cbf[x, y, z, ])
}
meancbf <- antsImageClone(moco_results$moco_avg_img)
meancbf[!is.nan(meanvalues)] <- meanvalues[!is.nan(meanvalues)]
# mask <- antsImageClone(moco_results$moco_avg_img) mask[mask < 500] <- 0 mask[mask > 0] <- 1
# for( x in 1:(dim(meancbf)[1]) ) for( y in 1:(dim(meancbf)[2]) ) for( z in 1:(dim(meancbf)[3]) ) { val <-
# meancbf[x,y,z] * mask[x,y,z] meancbf[ x , y , z ] <- val }
meancbf[(mask < 1)] <- 0
return(meancbf)
# return ( mask )
}
|
b94b385a464c1e7d0300a37fc764f91bbfb24bac | def6fc453320cea291f9359d67746f613d019760 | /man/ls_migration.Rd | 7d238a6769f9b17b9e7e82d0cfe52a05d04e7cf7 | [] | no_license | piLaboratory/GillesCom | 898380f6a10b9e04233dea867afdacda152ab47d | 63466aa2a445bc68c73ebc3b9959c443617069c2 | refs/heads/master | 2020-12-14T14:33:54.697316 | 2018-09-18T22:46:34 | 2018-09-18T22:46:34 | 44,714,587 | 1 | 0 | null | 2016-10-12T03:05:15 | 2015-10-22T01:30:31 | R | UTF-8 | R | false | true | 568 | rd | ls_migration.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Community.R
\name{ls_migration}
\alias{ls_migration}
\title{Helper functions}
\usage{
ls_migration(J, S, alpha, m)
}
\arguments{
\item{J}{expected size of metacommunity (total number of individuals)}
\item{S}{Expected number of species in the metacommunity}
\item{alpha}{Fisher's alpha of the metacommunity}
\item{m}{per species migration rate}
}
\description{
Generates migration rates from a log-series metacommunity. The user is expected
to provide either S or alpha, but not both.
}
|
aa4a35732e53864a4c7098ecd20485bec3430d10 | f8c9804e50a61d544250ecf5a1a03b357819a23a | /man/solvecov.Rd | eabd29551617d9e2e90414c35ef9dd5a8128e4f4 | [] | no_license | cran/mrds | c086ead932cd9e39c9aa7ee734bc55f0d2e8d425 | dfa8dff4d44565c0123ef6f3f1e9f0b152b6155c | refs/heads/master | 2023-07-27T08:16:08.397331 | 2023-07-06T10:30:15 | 2023-07-06T10:30:15 | 17,697,669 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 909 | rd | solvecov.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/solvecov.R
\name{solvecov}
\alias{solvecov}
\title{Invert of covariance matrices}
\usage{
solvecov(m, cmax = 1e+10)
}
\arguments{
\item{m}{a numeric symmetric matrix.}
\item{cmax}{a positive value, see above.}
}
\value{
A list with the following components: \code{inv} the inverted
matrix, \code{coll} \code{TRUE} if \code{solve} failed because of
singularity.
}
\description{
Tries to invert a matrix by \code{solve}. If this fails because of
singularity, an eigenvector decomposition is computed, and eigenvalues below
\code{1/cmax} are replaced by \code{1/cmax}, i.e., \code{cmax} will be the
corresponding eigenvalue of the inverted matrix.
}
\section{Source}{
\code{solvecov} code was taken from package \code{fpc}: Christian Hennig
}
\seealso{
solve, eigen
}
\author{
Christian Hennig
}
|
d0359c981cef48fc8b3d61df972c1963bc9362dc | c656d32330dfb2d5d5af8357dcb47cd20fe1b32c | /R/Plot of number of assessments per stock.R | 62f4b0e299710d50cfea1c3a9a1f04f4e2aad9eb | [] | no_license | martinpastoors/iAdvice | 898ab6fa38a5a1c0063716f0847938421888ea4a | 5127133f69f3d05ce2f27e6a806a4eaaf3062d5d | refs/heads/master | 2023-07-22T06:49:28.836931 | 2023-07-19T05:57:00 | 2023-07-19T05:57:00 | 102,521,867 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,733 | r | Plot of number of assessments per stock.R | # -----------------------------------------------------------------------------------------------
# Analyse assessments per stock
#
# 25/09/2017 taken from ices SAG data plot
# 16/10/2017 now includes datapublished
# 15/11/2018 updated for new database layout
# 23/03/2019 updated for new database again; only source is working so far
# -----------------------------------------------------------------------------------------------
library(tidyverse) # for piping and easy coding
library(reshape2) # reshaping data; e.g. dcast
library(ggthemes) # for themes
library(pander) # for print tables
library(readxl) # read excel files
library(cowplot) # multiplots
library(RColorBrewer) # colours
library(lubridate)
# library(ggstance) # extension to ggplot; vertical extensions; only works for ggplot >= 3.0.0
# Load utils code
source("../prf/r/my utils.r")
# Set working directory to dropbox folder
dropboxdir <- paste(get_dropbox(), "/iAdvice", sep="")
# Load dataset
load(file=paste(dropboxdir, "/rdata/iAssess.RData",sep=""))
load(file=paste(dropboxdir, "/rdata/iSpecies.RData",sep=""))
load(file=paste(dropboxdir, "/rdata/iSAG.RData",sep=""))
load(file=paste(dropboxdir, "/rdata/iRename.RData",sep=""))
load(file=paste(dropboxdir, "/rdata/iStockkey.RData",sep=""))
load(file=paste(dropboxdir, "/rdata/iSpecies.RData",sep=""))
# ---------------------------------------------------------------------------------------------
# plots from iAssess by assessment year and purpose
# ---------------------------------------------------------------------------------------------
splitter <- 80
x <-
# iAdvice %>%
iAssess %>%
# iSAGstock %>%
# dplyr::select(-stockkey, -icesareas) %>%
# left_join(iRename[,c("stockkeylabel","stockkey")], by="stockkeylabel") %>%
# left_join(iStockkey, by="stockkey") %>%
mutate_at(c("stockkeylabel","stockkeylabelold"), funs(tolower)) %>%
filter(!grepl("nep", stockkeylabel)) %>%
filter(!grepl("^rj", stockkeylabel)) %>%
# filter(speciesfaocode %in% c("cod","her","hom","mac","whb","ple","sol")) %>%
# filter(speciesfaocode %in% c("mac")) %>%
# filter(speciesfaocode %in% c("her")) %>%
# filter (!(source == "iadvice" & !grepl("bench", purpose))) %>%
filter (source != "iadvice") %>%
distinct(stockkey, stockkeylabel, stockkeylabelold, speciesfaocode, assessmentyear, purpose, published, source) %>%
# distinct(stockkey, stockkeylabel, stockkeylabelold, speciesfaocode, assessmentyear, purpose, published) %>%
mutate(stockkeylabelold = ifelse(is.na(stockkeylabelold), stockkeylabel, stockkeylabelold)) %>%
arrange(stockkeylabelold) %>%
mutate(id = group_indices(., stockkeylabelold)) %>%
data.frame() %>%
mutate(stockkeylabelold = factor(stockkeylabelold),
stockkeylabelold = factor(stockkeylabelold, levels = rev(levels(stockkeylabelold))),
source = factor(source),
source = factor(source, levels=rev(levels(source))),
purpose = factor(purpose),
purpose = factor(purpose, levels=rev(levels(purpose))),
published = factor(published),
published = factor(published, levels=rev(levels(published))),
column = ifelse(id <= splitter , 1, NA),
column = ifelse(id > splitter & id <= 2*splitter, 2, column),
column = ifelse(id > 2*splitter , 3, column)) %>%
# mutate(source = factor(source, levels=c("wg","qcs", "excel","sag"))) %>%
#mutate(source = "SAG") %>%
left_join(iSpecies, by="speciesfaocode")
# define colour scales for source
mySourceColors <- brewer.pal(length(levels(x$source)),"Set1")
names(mySourceColors) <- levels(x$source)
# define headers for columns
y <-
x %>%
group_by(column) %>%
filter(row_number()==1| row_number() == n()) %>%
ungroup() %>%
mutate(id = group_indices(., column)) %>%
select(column, id, stockkeylabelold) %>%
group_by(column) %>%
summarise(code = paste(stockkeylabelold, collapse=" : "))
# plot by stock and source
x %>%
left_join(y, by="column") %>%
ggplot(aes(x=assessmentyear, y=stockkeylabelold, group=source)) +
theme_publication() +
theme(panel.spacing = unit(1, "lines"),
panel.grid.major = element_line(colour = "grey70"),
text = element_text(size=8),
legend.title = element_blank()) +
geom_point(aes(colour = source), position=position_dodge(width=0.8), size=2 ) +
scale_colour_manual(name = "source", values = mySourceColors, na.value="lightgray") +
scale_y_discrete(position="right") +
labs(x = "assessmentyear", y = NULL ) +
facet_wrap(~code, scales="free_y", shrink=TRUE, ncol=3)
# Plot purpose
myPurposeColors <- brewer.pal(length(levels(x$purpose)),"Set1")
names(myPurposeColors) <- rev(levels(x$purpose))
# plot by stock and purpose
x %>%
filter(grepl("bench", purpose)) %>%
left_join(y, by="column") %>%
ggplot(aes(x=assessmentyear, y=stockkeylabelold)) +
theme_publication() +
theme(panel.spacing = unit(1, "lines"),
panel.grid.major = element_line(colour = "grey70"),
text = element_text(size=8),
legend.title = element_blank()) +
geom_point(aes(colour = purpose), position=position_dodge(width=0.8) ) +
scale_colour_manual(name = "purpose", values = myPurposeColors, na.value="lightgray") +
scale_y_discrete(position="right") +
labs(x = "assessmentyear", y = NULL ) +
facet_wrap(~code, scales="free_y", shrink=TRUE, ncol=3)
# myPublishedColors <- brewer.pal(length(levels(x$published)),"Set1")
# names(myPublishedColors) <- rev(levels(x$published))
# plot by stock and published
x %>%
left_join(y, by="column") %>%
ggplot(aes(x=assessmentyear, y=stockkeylabelold, group=published)) +
theme_publication() +
theme(panel.spacing = unit(1, "lines"),
panel.grid.major = element_line(colour = "grey70"),
text = element_text(size=8),
legend.title = element_blank()) +
geom_point(aes(colour = published), position=position_dodge(width=0.8) ) +
scale_colour_manual(name = "source", values = myPublishedColors, na.value="lightgray") +
scale_y_discrete(position="right") +
labs(x = "assessmentyear", y = NULL ) +
facet_wrap(~code, scales="free_y", shrink=TRUE, ncol=3)
# ---------------------------------------------------------------------------------------------
# plots from iAdvice by assessment year and other variables
# ---------------------------------------------------------------------------------------------
splitter <- 37
x <-
iAdvice %>%
mutate_at(c("stockkeylabel","stockkeylabelold"), funs(tolower)) %>%
filter(speciesfaocode %in% c("mac")) %>%
filter(adviceonstock=="Y") %>%
distinct(stockkey, stockkeylabel, stockkeylabelold, speciesfaocode, assessmentyear, purpose, published) %>%
mutate(stockkeylabelold = ifelse(is.na(stockkeylabelold), stockkeylabel, stockkeylabelold)) %>%
arrange(stockkeylabelold) %>%
mutate(id = group_indices(., stockkeylabelold)) %>%
data.frame() %>%
mutate(stockkeylabelold = factor(stockkeylabelold),
stockkeylabelold = factor(stockkeylabelold, levels = rev(levels(stockkeylabelold))),
purpose = factor(purpose),
purpose = factor(purpose, levels=rev(levels(purpose))),
published = factor(published),
published = factor(published, levels=rev(levels(published))),
column = ifelse(id <= splitter , 1, NA),
column = ifelse(id > splitter & id <= 2*splitter, 2, column),
column = ifelse(id > 2*splitter , 3, column)) %>%
left_join(iSpecies, by="speciesfaocode")
# define colour scales
myPurposeColors <- brewer.pal(length(levels(x$purpose)),"Set1")
names(myPurposeColors) <- rev(levels(x$purpose))
myPublishedColors <- brewer.pal(length(levels(x$published)),"Set1")
names(myPublishedColors) <- rev(levels(x$published))
# define headers for columns
y <-
x %>%
group_by(column) %>%
filter(row_number()==1| row_number() == n()) %>%
ungroup() %>%
mutate(id = group_indices(., column)) %>%
select(column, id, stockkeylabelold) %>%
group_by(column) %>%
summarise(code = paste(stockkeylabelold, collapse=" : "))
# plot by stock and purpose
x %>%
left_join(y, by="column") %>%
ggplot(aes(x=assessmentyear, y=stockkeylabelold)) +
theme_publication() +
theme(panel.spacing = unit(1, "lines"),
panel.grid.major = element_line(colour = "grey70"),
text = element_text(size=8),
legend.title = element_blank()) +
geom_point(aes(colour = purpose), position=position_dodge(width=0.8), size = 2 ) +
scale_colour_manual(name = "purpose", values = myPurposeColors, na.value="lightgray") +
scale_y_discrete(position="right") +
labs(x = "assessmentyear", y = NULL ) +
facet_wrap(~code, scales="free_y", shrink=TRUE, ncol=3)
# plot by stock and published
x %>%
left_join(y, by="column") %>%
ggplot(aes(x=assessmentyear, y=stockkeylabelold, group=published)) +
theme_publication() +
theme(panel.spacing = unit(1, "lines"),
panel.grid.major = element_line(colour = "grey70"),
text = element_text(size=8),
legend.title = element_blank()) +
geom_point(aes(colour = published), position=position_dodge(width=0.8) ) +
scale_colour_manual(name = "source", values = myPublishedColors, na.value="lightgray") +
scale_y_discrete(position="right") +
labs(x = "assessmentyear", y = NULL ) +
facet_wrap(~code, scales="free_y", shrink=TRUE, ncol=3)
|
c0580ec33f87be9c34f1a2d0285e6fe73dfe7a47 | 9c8e51eb942247ce929a0740b5b348de8cc5f862 | /test/newton.R | 5db835a8512d896c52c1ed62ab68689f24148bc3 | [] | no_license | jiangfeng1124/optimization | 2db2ecfd1c8ab7bf21c4749130ca4fa5c9244e77 | 11532e68f9155c2ab17778ed02a79fd95adc7695 | refs/heads/master | 2016-08-04T08:43:54.827349 | 2013-03-27T10:19:21 | 2013-03-27T10:19:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 696 | r | newton.R | source("src/newton.R")
cat(">>> example: 1-dimensional\n")
f <- function(x)
{
return((x[1] - 5.0)^4)
}
g <- function(x)
{
return(array(c(4.0 * (x[1] - 5.0)^3)))
}
h <- function(x)
{
return(array(c(12.0 * (x[1] - 5.0)^2)))
}
initial.x <- array(c(0.0))
results <- Newton(f, g, h, initial.x, show.trace = TRUE)
print(results)
cat(">>> example: 2-dimensional\n")
eta = 0.9
f2 <- function(x)
{
return((1.0 / 2.0) * (x[1]^2 + eta * x[2]^2))
}
g2 <- function(x)
{
return(array(c(x[1], eta * x[2])))
}
h2 <- function(x)
{
return(matrix(c(1.0, 0.0, 0.0, eta), nrow=2, ncol=2))
}
initial.x <- array(c(127.0, 921.0))
results <- Newton(f2, g2, h2, initial.x, show.trace = TRUE)
print(results)
|
fe811fa15792b3c727c6e7bcda23cd17ef13eafc | 4e4892c47c62184e2cb00660353f3c6cf5f37474 | /man/vcfR2GTCov.Rd | 2237258d547420f2d53fcdf7e69115ecda7d8c28 | [] | no_license | OJWatson/vcfRmanip | a3b1f7e5b8976c7dcbfdceafad4dae88fa04c1f1 | cc138e49fa9749bdc10d65661a42f31e641fa9b5 | refs/heads/master | 2021-01-06T17:52:38.128739 | 2020-03-05T18:00:13 | 2020-03-05T18:00:13 | 241,425,854 | 0 | 0 | null | 2020-02-18T17:36:44 | 2020-02-18T17:36:43 | null | UTF-8 | R | false | true | 298 | rd | vcfR2GTCov.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vcf_viz.R
\name{vcfR2GTCov}
\alias{vcfR2GTCov}
\title{Plot vcf outputs collectively}
\usage{
vcfR2GTCov(vcfRobject = NULL, biallelic = T)
}
\description{
Takes a \code{vcf} or \code{vcfRobject} and plots the GT levels
}
|
c3ab5cda8f72d6010fa7a7ead9e8d148423374f4 | dc76c214102f7ce9d3f359b4728bb0e8ff335984 | /DownloadData.R | 769d3704da40e310ec65ff194a53f56fe79dc9fb | [] | no_license | pjvolders/Covid19TrendAnalysis | 552d9f0668a52ecf3db5382cc8e817ee35856a95 | 58bd8c85f7c0da815e46309bfc9d63fa96706a17 | refs/heads/master | 2021-04-13T01:50:40.257309 | 2020-04-07T09:38:26 | 2020-04-07T09:38:26 | 249,126,395 | 0 | 0 | null | 2020-03-22T06:28:07 | 2020-03-22T06:28:07 | null | UTF-8 | R | false | false | 3,757 | r | DownloadData.R | #------------------------------------
# Download and selection data Belgium
# Author: Joris Meys
# date last modified: 2020-03-08
#------------------------------------
# This file will check the latest data on the repo of John Hopkins
# It will -if necessary - download those and add the new data to
# the original file.
library(tidyverse)
library(conflicted)
library(lubridate)
conflict_prefer("filter", "dplyr")
# Setup
firstdate <- as.Date("2020-01-22")
fprocessed <- file.path("processed","covid_selection.csv")
# Choose countries to keep
#countries <- c("Belgium","France","Germany","Italy","Netherlands", "UK", #"China", "Mainland China",
# "Spain", "South Korea","Korea, South", "Japan", "US", "United Kingdom")
days <- seq(firstdate,Sys.Date() - 1,
by = "1 day")
fnames <- paste0(format(days, "%m-%d-%Y"),".csv")
if(!dir.exists("rawdata")) dir.create("rawdata")
if(!dir.exists("processed")) dir.create("processed")
# if(!file.exists(fprocessed)){
# tmpdf <- data.frame(date = integer(0), Confirmed = integer(0),
# Deaths = integer(0), Recovered = integer(0),
# Country = character(0))
# write.csv(tmpdf,
# fprocessed,
# row.names = FALSE
# )
# rm(tmpdf)
# }
#----------------------------------------
# Download files from John Hopkins (thx!)
master_url <- paste("https://raw.githubusercontent.com",
"CSSEGISandData/COVID-19/master",
"csse_covid_19_data",
"csse_covid_19_daily_reports",
sep = "/")
for(fn in fnames){
thefile <- file.path("rawdata",fn)
if(!file.exists(thefile))
download.file(file.path(master_url,fn),
dest = thefile)
}
processed_data = map_dfr(fnames, function(fn){
f_date = mdy(str_remove(fn, '.csv'))
read_csv(file.path("rawdata",fn)) %>%
rename(Country = starts_with('Country')) %>%
mutate(date = f_date) %>%
select(date, Confirmed, Deaths, Recovered, Country) %>%
#filter(Country %in% countries) %>%
group_by(Country, date) %>%
summarise_all(sum) %>%
ungroup()
})
processed_data = processed_data %>%
mutate(Country = replace(Country, Country=="United Kingdom", "UK")) %>%
mutate(Country = replace(Country, Country=="Mainland China", "China")) %>%
mutate(Country = replace(Country, Country=="Viet Nam", "Vietnam")) %>%
mutate(Country = replace(Country, Country=="Korea, South", "South Korea"))
write_csv(processed_data, fprocessed)
#----------------------------------------
# Select data for Belgium
# find files to add
# presdates <- read.csv(fprocessed,
# colClasses = c("Date",rep("numeric",3),
# "character")
# )
# latest <- max(presdates$date, firstdate - 1, na.rm = TRUE)
# id <- days > latest
#cols <- c("Country/Region","Confirmed","Deaths","Recovered")
# Loop over the necessary files, find data, add to .csv
# for(i in which(id)){
# fn <- fnames[i]
# tmp <- read_csv(file.path("rawdata",fn), ) %>%
# mutate(date = days[i]) %>%
# select(date, Confirmed, Deaths, Recovered,
# Country = `Country_Region`) %>%
# filter(Country %in% countries) %>%
# group_by(Country, date) %>%
# summarise_all(sum) %>%
# ungroup() %>%
# select(date, Confirmed, Deaths, Recovered, Country)
#
# write.table(tmp,
# fprocessed,
# row.names = FALSE,
# col.names = FALSE,
# append = TRUE,
# sep = ",")
# }
#' Sciensano data Belgium
sciensano_data_url = 'https://epistat.sciensano.be/Data/COVID19BE_HOSP.csv'
download.file(sciensano_data_url,
dest = 'COVID19BE_HOSP.csv')
# Cleanup
rm(list = ls())
|
1640b3756efdf1e50d0ab70222c5bcabd32b2bb2 | 40962c524801fb9738e3b450dbb8129bb54924e1 | /DAY - 2/Class/If.R | 57158bacc9159afb0a0982f6a265c657860e9d9d | [] | no_license | klmsathish/R_Programming | 628febe334d5d388c3dc51560d53f223585a0843 | 93450028134d4a9834740922ff55737276f62961 | refs/heads/master | 2023-01-14T12:08:59.068741 | 2020-11-15T13:23:31 | 2020-11-15T13:23:31 | 309,288,498 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 72 | r | If.R | #IF condidtions
x <- c(15,8,9,45,89,23)
for (i in 1:6)
{
print(x[i])
} |
9219cb8ef176251c8be90633e50a3bbbc596f6c4 | 299bb5c4e121f9e4f95fb851fc4381eda137afd4 | /src/data-prep/download-file.R | 4e96ef8207d0365a0bbd8439886a7be5c7e39215 | [] | no_license | dogabayraktar/SickAirbnbPricesAcrossNetherlands | db8b52fc2273b2ef4fc3d2e1b9b4c10f54cd81dd | 305f59689714ab826eaa8004f6dc122400a77f90 | refs/heads/main | 2023-08-14T11:22:27.103219 | 2021-10-17T14:30:15 | 2021-10-17T14:30:15 | 404,368,240 | 2 | 2 | null | 2021-10-05T17:02:50 | 2021-09-08T13:58:00 | R | UTF-8 | R | false | false | 1,360 | r | download-file.R | ######################
### DOWNLOAD DATA ####
######################
# Creating the data folder
dir.create('../../data')
# creating a list with the download link and file names of the raw data
files = list(c(url='http://data.insideairbnb.com/the-netherlands/north-holland/amsterdam/2020-12-12/data/listings.csv.gz',
fn='listings-12.20.csv.gz'),
c(url='http://data.insideairbnb.com/the-netherlands/north-holland/amsterdam/2021-01-09/data/listings.csv.gz',
fn='listings-01.21.csv.gz'),
c(url='http://data.insideairbnb.com/the-netherlands/north-holland/amsterdam/2021-02-08/data/listings.csv.gz',
fn='listings-02.21.csv.gz'),
c(url='http://data.insideairbnb.com/the-netherlands/north-holland/amsterdam/2021-03-04/data/listings.csv.gz',
fn='listings-03.21.csv.gz'),
c(url='http://data.insideairbnb.com/the-netherlands/north-holland/amsterdam/2021-04-09/data/listings.csv.gz',
fn='listings-04.21.csv.gz'),
c(url='http://data.insideairbnb.com/the-netherlands/north-holland/amsterdam/2021-05-19/data/listings.csv.gz',
fn='listings-05.21.csv.gz'))
# looping over the list to download and save the file in the data folder
for (item in files) {
download.file(item['url'], paste0('../../data/', item['fn']))
} |
df168a159f3eecc049f8375e3dd63605deea1104 | 661a2e1bdd2eaf48c3ec7f93531821ee4e574292 | /man/getAffectedRCW.Rd | a430d4d27201dff955a2acd3dbc97513c3893a7d | [] | no_license | cran/washex | dc94cae67e9654d72184e7d37bb9c1c0ce763a27 | 561ac582539d94b46c3e1020386a0712ac4c4a5d | refs/heads/master | 2023-09-04T21:38:52.121744 | 2021-11-17T15:00:02 | 2021-11-17T15:00:02 | 362,524,979 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,488 | rd | getAffectedRCW.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getAffectedRCW.R
\name{getAffectedRCW}
\alias{getAffectedRCW}
\title{Link bills to Revised Code of Washington (RCW)}
\usage{
getAffectedRCW(biennium, billId, paired = TRUE, type = c("df", "list", "xml"))
}
\arguments{
\item{biennium}{Character vector representing the biennium(s) to be
searched. Each argument should take the form "XXXX-YY"}
\item{billId}{Character vector containing the bill(s) to be retrieved.
Each argument should take the form "XX YYYY", where XX
is the prefix (HB, SB, etc.) and YYYY is the bill number.}
\item{paired}{If TRUE, will assume that equal length vectors represent
paired data. Set to FALSE to generate an NxN grid of input
arguments. Applies to equal length vector inputs only.}
\item{type}{One of "df", "list", or "xml". Specifies the format for
the output.}
}
\value{
\code{getAffectedRCW} returns an object of type equal to the
\code{type} argument (defaults to dataframe)
}
\description{
Get a listing of all RCW citations affected by a given bill
}
\section{Note}{
for more information on RCW codes, see
\url{https://apps.leg.wa.gov/rcw/}
}
\examples{
## usage for a single bill case, XML form
getAffectedRCW("2005-06", "HB 1427", type = "xml")
## generates a dataframe of affected codes from all bills in 2007
\dontrun{
bills <- getLegislationByYear("2007")
codesAffected <- getAffectedRCW("2007-08", bills$BillId)}
}
|
1e4ef896e21a0021002b4019da258ace219f6fa1 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/SoilR/tests/Rtest.R | 1a477ce2229207d35b724fb5eab27040ac20c874 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,219 | r | Rtest.R | #!/usr/bin/Rscript
# vim:set ff=unix expandtab ts=2 sw=2:
source("prolog.R")
tfr <- "^runit\\..*\\.R"
#fl <- list.files(pattern=tfr)
#for (fn in fl){
# print(fn)
# source(fn)
#}
alltests <- defineTestSuite(
name="allTests",
dirs=c(".","protected"),
testFileRegexp = tfr,
#testFuncRegexp = "^test.TwopSerial_linear_vs_nonlinear"
#"^test.FourpSerial_1"
#"test.TwopParallel_ZeroInput"
#"^test.TwopFeedback"
#"^test.TimeMapInterface"
#"^test.LowVerticalRatesPaper"
#"^test.check.pass"
#"test.ModelInit"
#"ptest.ModelOperators"
#"test.ParallelModel"
#"test.TwopSerial_linear_vs_nonlinear"
#"test.SoilRPaper1"
#"test.FourpSerial_1"
#"test.BoundFc"
#"test.ThreepFeedbackModel14|test.ThreepParallelModel14|test.ThreepSeriesModel14|test.TwopFeedbackModel14|test.TwopParallelModel14|test.TwopSeriesModel14"
#"test.LowVerticalRatesPaper|test.ModelInit|test.SoilRPaper1"
"test.LowVerticalRatesPaper|test.SoilRPaper1"
#"test.Deprecation"
#"test.GaudinskiModel14"
#"test.MC"
)
testResult <- runTestSuite(alltests)
printTextProtocol(testResult)
#produce exitstatus ne 0 for buildbot to notice
ef=getErrors(testResult)
n=ef$nErr+ef$nFail
if (n>0) {stop(1)}
|
1660cc3d35f0c9bb659af9d196df59bce0520f0f | 13abd22dc69e26aeccc1f74bd406cd2842fd97aa | /man/pullBundles-HumanCellAtlas-method.Rd | e00520d4f555bb52248a062978ba6dd31a43c5a2 | [] | no_license | kevinrue/HCABrowser | dad2763c68b97e4163bf8b1e6c654c7631152b46 | d5d7df17084d4fc7195418c2194e3a038cc6b494 | refs/heads/master | 2020-04-24T04:48:18.238369 | 2019-02-20T17:15:44 | 2019-02-20T17:15:44 | 171,716,714 | 0 | 0 | null | 2019-02-20T17:14:47 | 2019-02-20T17:14:46 | null | UTF-8 | R | false | true | 530 | rd | pullBundles-HumanCellAtlas-method.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllClasses.R
\docType{methods}
\name{pullBundles,HumanCellAtlas-method}
\alias{pullBundles,HumanCellAtlas-method}
\title{Obtain bunlde fqids from a HumanCellAtlas object}
\usage{
\S4method{pullBundles}{HumanCellAtlas}(hca, ...)
}
\arguments{
\item{hca}{A HumanCellAtlas object}
}
\value{
character(1) of bundle fqids
}
\description{
Obtain bunlde fqids from a HumanCellAtlas object
}
\examples{
hca <- HumanCellAtlas()
hca <- hca \%>\% pullBundles
}
|
7f210a13475a3f68962686d2729d5ba8b25a86c7 | c05830a0e61d0e975374002049383f5c072e085a | /Plot4.R | ce02254592c79dfbff8ef1271b0bad820c307e81 | [] | no_license | BANEMI/Exploratory-Data-Analysis | dc39bfc00c56eca73b428988f24954ff45a11dea | 71a5c68491ad74a6a92dbdae43684edcea71a2b0 | refs/heads/master | 2021-01-24T22:20:54.406118 | 2016-04-26T02:31:22 | 2016-04-26T02:31:22 | 57,058,449 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,722 | r | Plot4.R | #### reading data file "household_power_consumption.txt" stored in WD
data <- read.csv("household_power_consumption.txt", header = T, sep = ';',quote = '\"',stringsAsFactors=F,nrows= 2075259,na.strings = "?")
#### converint date into date fromat
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
#### selecting only part of data that is required to reproduce plots
DateSubSet <- subset(data, subset = (Date == "2007-02-01" | Date == "2007-02-02"))
#### Convertin time into time fromat
#### creating new variable and adding it to the data set by concatinating date and time variables
ConcatDT <- paste(as.Date(DateSubSet$Date), DateSubSet$Time)
DateSubSet$DateTime <- as.POSIXct(ConcatDT)
#### Creating a plot (4 plots in 2*2 matrix )
#plot4
png(file = "plot4.png", bg = "white")
par(mfcol = c(2,2))
with(df, {
plot(DateSubSet$Global_active_power ~ DateSubSet$DateTime,ylab = "Global Active Power", xlab = "",type = "l")
plot(DateSubSet$DateTime, DateSubSet$Sub_metering_1, ylab="Energy sub metering", xlab="", type="n")
lines(DateSubSet$DateTime,DateSubSet$Sub_metering_1, ylab="Energy sub metering", type="l")
lines(DateSubSet$DateTime,DateSubSet$Sub_metering_2, ylab="Energy sub metering", type="l", col="red")
lines(DateSubSet$DateTime,DateSubSet$Sub_metering_3, ylab="Energy sub metering", type="l", col="blue")
legend("topright", legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),bty="n",lwd=c(2,2,2),lty = c(1,1,1), col=c("black","red","blue"))
plot(Voltage ~ DateTime,data=DateSubSet, type = "l", ylab = "Voltage", xlab = "datetime")
plot(Global_reactive_power ~ DateTime,data=DateSubSet,type = "l", ylab = "Global_rective_power", xlab = "datetime")
})
dev.off() |
9788b52b561d7b51c3f67328cccbafa6d38e32cd | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/timetools/examples/timetools-package.Rd.R | e47068e06697a44d0d0c9b74bf4a0e1a8dd03788 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,099 | r | timetools-package.Rd.R | library(timetools)
### Name: timetools-package
### Title: Seasonal/Sequential (Instants/Durations, Even or not) Time
### Series
### Aliases: timetools-package timetools
### Keywords: package
### ** Examples
ti1 <- TimeIntervalDataFrame (
c('2010-01-01', '2010-02-01'), c('2010-02-01', '2010-02-02'),
'UTC', data.frame(ex1=1:2) )
ti2 <- TimeIntervalDataFrame (
c('2010-01-01', '2010-02-01', '2010-02-02'), NULL,
'UTC', data.frame(ex1=1:2) )
all.equal (ti1, ti2)
ti3 <- TimeIntervalDataFrame (
c('2010-01-01', '2010-01-02', '2010-01-04'), NULL,
'UTC', data.frame(ex3=c(6, 1.5)))
# weighted mean over a period of 3 days with at least 75% of
# coverage (NA is retunr if not)
ti3
d <- POSIXctp(unit='day')
changeSupport (ti3, 3L*d, 0.75)
ti4 <- TimeIntervalDataFrame (
c('2010-01-01', '2010-01-02', '2010-01-04',
'2010-01-07', '2010-01-09', '2010-01-10'), NULL,
'UTC', data.frame(ex4=c(6, 1.5, 5, 3, NA)))
# weighted mean over a period of 3 days with at least 75% of
# coverage (NA is retunr if not) or 50%
ti4
changeSupport (ti4, 3L*d, 0.75)
changeSupport (ti4, 3L*d, 0.5)
|
3fbbfcfd18ce8eb269f7ace8ddcc0fed6cacdfce | 5c53e88dfb1347da0133c66cac28a4d8ce2a466c | /man/getOptPathParetoFront.Rd | 3d10a762a826dbab5fee7f85b928d735e3610c4e | [] | no_license | bklppr/ParamHelpers | 3fa0342fa3919d048a33ad1dcf1dae002e1000b1 | 54ac927fcdc589711d01ad885f7a98d5791967b9 | refs/heads/master | 2021-01-20T03:53:26.526669 | 2017-04-03T12:59:50 | 2017-04-03T12:59:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,154 | rd | getOptPathParetoFront.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OptPath_getter.R
\name{getOptPathParetoFront}
\alias{getOptPathParetoFront}
\title{Get indices of pareto front of optimization path.}
\usage{
getOptPathParetoFront(op, y.names = op$y.names, dob = op$env$dob,
eol = op$env$eol, index = FALSE)
}
\arguments{
\item{op}{[\code{\link{OptPath}}]\cr
Optimization path.}
\item{y.names}{[\code{character}]\cr
Names of performance measures to construct pareto front for.
Default is all performance measures.}
\item{dob}{[\code{integer}]\cr
Vector of date-of-birth values to further subset the result.
Only elements with a date-of-birth included in \code{dob} are selected.
Default is all.}
\item{eol}{[\code{integer}]\cr
Vector of end-of-life values to further subset the result.
Only elements with an end-of-life included in \code{eol} are selected.
Default is all.}
\item{index}{[\code{logical(1)}]\cr
Return indices into path of front or y-matrix of nondominated points?
Default is \code{FALSE}.}
}
\value{
[\code{matrix} | \code{integer}]. Either matrix (with named columns) of points of front
in objective space or indices into path for front.
}
\description{
Get indices of pareto front of optimization path.
}
\examples{
ps = makeParamSet(makeNumericParam("x"))
op = makeOptPathDF(par.set = ps, y.names = c("y1", "y2"), minimize = c(TRUE, TRUE))
addOptPathEl(op, x = list(x = 1), y = c(5, 3))
addOptPathEl(op, x = list(x = 2), y = c(2, 4))
addOptPathEl(op, x = list(x = 3), y = c(9, 4))
addOptPathEl(op, x = list(x = 4), y = c(4, 9))
as.data.frame(op)
getOptPathParetoFront(op)
getOptPathParetoFront(op, index = TRUE)
}
\seealso{
Other optpath: \code{\link{OptPath}},
\code{\link{addOptPathEl}},
\code{\link{getOptPathBestIndex}},
\code{\link{getOptPathCols}},
\code{\link{getOptPathCol}}, \code{\link{getOptPathDOB}},
\code{\link{getOptPathEOL}}, \code{\link{getOptPathEl}},
\code{\link{getOptPathErrorMessages}},
\code{\link{getOptPathExecTimes}},
\code{\link{getOptPathLength}},
\code{\link{getOptPathX}}, \code{\link{getOptPathY}},
\code{\link{setOptPathElDOB}},
\code{\link{setOptPathElEOL}}
}
|
373284b9b3116e4fbc70265c9f552d0be13af297 | da905bb3f941fd07a0bb1b6c2c0aecd25ff6b6d4 | /writeups/childdev_2017/save_analyses.R | 06e7f3f6f8da06441a0646ed7b2e85973110b8fd | [
"MIT"
] | permissive | langcog/metalab2 | 674a32d3d91b00154c94827d48cf35dda7e2dd1b | 3d66130ec4c4c82761acfdd18df5a18b5f5cac0a | refs/heads/master | 2021-11-20T04:22:43.074913 | 2020-11-24T14:15:20 | 2020-11-24T14:15:20 | 108,099,389 | 20 | 16 | MIT | 2020-10-29T04:27:39 | 2017-10-24T08:35:31 | HTML | UTF-8 | R | false | false | 350 | r | save_analyses.R | ## LOAD DATA AND PACKAGES ####
source("analyses/initial_data.R")
## RUN ANALYSES ####
source("analyses/sample_size.R")
source("analyses/power.R")
source("analyses/method.R")
source("analyses/p_values.R")
source("analyses/funnel.R")
source("analyses/bias.R")
## SAVE ENVIRONMENT FOR USE IN PAPER ####
save.image("educationpaper_environment.RData")
|
95928098403b662c57e6a142f9f451bb8fcbb099 | dc47d04bf9e44f675111284c31719dc78c46566c | /man/country2.Rd | c66f13876ccb1f236e9209dff4f6882e0dbf308a | [] | no_license | cran/eFRED | 010025cdfb7483f821e9ae9f0524f1189bf019af | ea6c449421809b52e03a51b1adb124630062fe53 | refs/heads/master | 2023-02-27T16:28:04.743335 | 2021-01-15T08:40:13 | 2021-01-15T08:40:13 | 334,104,607 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 352 | rd | country2.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{country2}
\alias{country2}
\title{Country Codes}
\format{Character vector of length 249}
\source{
\url{https://en.wikipedia.org/wiki/ISO_3166}
}
\usage{
country2
}
\description{
A vector containing ISO 2-digit country codes.
}
\keyword{datasets}
|
fea6912a9fdca7007f8b487a08502a01c0369aab | db6765c7b12c08d5af52bea7aa3cccb4dc615e9f | /LR1.R | 2963641b8f2d11f34410648c23b5e4ec093d32c8 | [] | no_license | Zraeva-Ekaterina/s108083 | e5cfd72bc44ddd5e34fe5e557178d18ec53f19eb | b51115eebb212292824190cc07b13b72992f48b1 | refs/heads/main | 2023-02-07T15:02:14.538675 | 2020-12-28T08:56:52 | 2020-12-28T08:56:52 | 312,556,765 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,479 | r | LR1.R | library('Hmisc')
library('corrplot')
library('nortest')
library('knitr')
# 1.Importing data ------------------------------------------------------------
DF<-read.csv2("Data_Zraeva.csv",
encoding = "UTF-8",
stringsAsFactors = F,
na.strings = '')
View(DF)
dim(DF)
reg.df <- DF[as.numeric(row.names(DF)) < 1000, -1]
reg.df <- na.omit(reg.df)
reglog.df<-log10(reg.df[3:7])
# 2. Analysis of data distribution ----------------------------------------------
# Histogram ==================================================================
par(mfrow = c(3, 2))
par(oma = c(0, 0, 1.5, 0))
par(mar = c(4, 4, 0.5, 0.5))
for (i in 3:7) {
x <- reg.df[, i]
hist(x,
freq = F,
col = 'wheat',
xlab = colnames(reg.df)[i],
ylab = 'Density',
main = '')
curve(dnorm(x, mean = mean(x), sd = sd(x)), col = 'darkblue',
lwd = 2, add = TRUE)
}
title(main = 'Histograms of the distribution of indicators',
outer = TRUE, cex = 1.5)
par(mfrow = c(1, 1))
# Normality tests Shapiro Wilka ========================================================
shapiro.test(reg.df$y)
apply(reg.df[, 3:7], 2, shapiro.test)
str(shapiro.test(reg.df$y))
apply(reg.df[, 3:7], 2, function (x) {
round(shapiro.test(x)$statistic, 2)
})
peremennaia <- sapply(reg.df[, 3:7], function (x) { round(shapiro.test(x)$statistic, 2)})
table <- data.frame(peremennaia)
kable(table)
#3. Analysis of linear relationships ---------------------------------------------
# Scatter charts =============================================================
pairs(reg.df[3:7],
pch = 21,
col = rgb(0, 0, 1, alpha = 0.4),
bg = rgb(0, 0, 1, alpha = 0.4),
cex = 1.1)
# Correlation matrix ======================================================
matrix.cor <- cor(reg.df[3:7])
matrix.p <- rcorr(as.matrix(reg.df[3:7]))$P
corrplot(matrix.cor,
method = c("shade"),
addshade = "all",
order = 'original',
diag = F,
p.mat = matrix.p,
insig = 'blank',
sig.level = 0.05)
# 4. Analysis of data distribution for log ----------------------------------------------
# Histogram for log==================================================================
par(mfrow = c(3, 2))
par(oma = c(0, 0, 1.5, 0))
par(mar = c(4, 4, 0.5, 0.5))
for (i in 1:5) {
x <- reglog.df[, i]
hist(x,
freq = F,
col = 'wheat',
xlab = colnames(reglog.df)[i],
ylab = 'Density',
main = '')
curve(dnorm(x, mean = mean(x), sd = sd(x)), col = 'darkblue',
lwd = 2, add = TRUE)
}
title(main = 'Histograms of the distribution of indicators',
outer = TRUE, cex = 1.5)
par(mfrow = c(1, 1))
# Normality tests Shapiro-Wilka for log========================================================
shapiro.test(reglog.df$y)
apply(reglog.df[, 1:5], 2, shapiro.test)
str(shapiro.test(reglog.df$y))
apply(reglog.df[, 1:5], 2, function (x) {
round(shapiro.test(x)$statistic, 2)
})
peremennaialog <- sapply(reglog.df[, 1:5], function (x) { round(shapiro.test(x)$statistic, 2)})
table1 <- data.frame(peremennaialog)
kable(table1)
#5. Analysis of linear relationships for log ---------------------------------------------
# Scatter charts for log=============================================================
pairs(reglog.df[1:5],
pch = 21,
col = rgb(0, 0, 1, alpha = 0.4),
bg = rgb(0, 0, 1, alpha = 0.4),
cex = 1.1)
# Correlation matrix for log ======================================================
matrix.cor <- cor(reglog.df[1:5])
matrix.p <- rcorr(as.matrix(reglog.df[1:5]))$P
corrplot(matrix.cor,
method = c("shade"),
addshade = "positive",
order = 'original',
diag = F,
p.mat = matrix.p,
insig = 'blank',
sig.level = 0.05)
# 6. Keeping your workspace ------------------------------------------
ls()
save(list = c('DF', 'reg.df', 'reglog.df'), file = 'Example.RData') |
82c797916fc5b48a60a583be81d9e44d3eb7cacb | 3f32eae1be4ce3c3904838f58d300318594ed315 | /Functions/productionsim.R | 9415d2769917c20724e1d91c01f28474c798a6ba | [
"MIT"
] | permissive | jonwilkey/uinta_basin_oil_and_gas_emissions | 2babe13429d1b94c2a2f5ce912435b09aba164e0 | bfd502cc5b044a4a1fc53e3f9ca415310e4c017b | refs/heads/master | 2021-01-19T11:39:35.011274 | 2017-04-11T23:04:07 | 2017-04-11T23:04:07 | 87,986,100 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 7,190 | r | productionsim.R | # Function Info -----------------------------------------------------------
# Name: productionsim.R (Production Simulation)
# Author(s): Jon Wilkey
# Contact: jon.wilkey@gmail.com
# Inputs ------------------------------------------------------------------
# wsim - data.table with information about each well
# timesteps - Number of months to be simulated
# decline.type - character string switch for determining what decline/production
# curve equation to use (hyperbolic DC or cumulative production)
# Outputs -----------------------------------------------------------------
# osim/gsim - Matrix with rows = individual wells and columns = timesteps that
# gives production volume timeseries (of oil/gas) for each well in bbl (for oil)
# or MCF (for gas)
# Description -------------------------------------------------------------
# This function calculates the production volumes of oil or gas (depending on
# well type) for each well in wsim according to [1] the hyperbolic decline curve
# equation or [2] the cumulative production curve:
# [1] q = qo*(1+b*Di*t)^(-1/b)
#
# [2] Q = Cp*t^0.5+c1
# where q is production (in bbl oil or MCF gas), qo is the initial production
# rate, b is the decline exponent, Di is the initial decline rate, Cp and c1 are
# constants in the cumulative production curve, and t is time. The model
# calculates the production volumes using either Eq.[1] or Eq.[2]. If
# decline.type == "a", then Eq.[1] is used. If decline.type == "b", then Eq.[2]
# is used. Regardless of which equation is used, the coefficients are taken from
# the values stored in wsim and the calculation is peformed using the apply()
# function on each row of wsim.
# Function ----------------------------------------------------------------
productionsim <- function(wsim, timesteps, decline.type) {
# Get decline curve coefficients from wsim ------------------------------
# Extract required coefficients from wsim. These columns are lated
# called (in the order specified here) by the function prodfun.
switch(decline.type,
# For hyperbolic decline curve
a = {
coef.oil <- with(wsim, matrix(c(tDrill, td.oil, qo.oil, b.oil, Di.oil, tend), ncol = 6))
coef.gas <- with(wsim, matrix(c(tDrill, td.gas, qo.gas, b.gas, Di.gas, tend), ncol = 6))
},
# For cumulative production curve
b = {
coef.oil <- with(wsim, matrix(c(tDrill, td.oil, Cp.oil, c1.oil, tend), ncol = 5))
coef.gas <- with(wsim, matrix(c(tDrill, td.gas, Cp.gas, c1.gas, tend), ncol = 5))
})
# Internal hyperbolic and cumulative production equations ---------------
# Function for calculating production schedule for individual well
# using Eq.[1]
prodfun <- function(x) {
# Get number of zeros to include (from months before well was
# drilled and from delay between well drilling and start of first
# decline curve)
TDzeros <- ifelse(test = (x[1]-1)+x[2] >= 0,
yes = (x[1]-1)+x[2],
no = 0)
# If equal to or greater than timesteps
if (TDzeros >= timesteps) {
# Then all production values for this well are zero
TD <- rep(0, times = timesteps)
# And the production volume vector Q is null
Q <- NULL
} else {
# The number of zero values is equal to TDzeros
TD <- rep(0, times = TDzeros)
# Create vector of months for which production will be
# calculated. Note that time vector is increased by value of tend
# to account for how old a prior well might be.
tvec <- c(1:(timesteps-TDzeros))+x[6]
# Calculate production in each month by Eq.[1]
Q <- x[3]*(1+x[4]*x[5]*tvec)^(-1/x[4])
}
# Return concatonated result
return(c(TD, Q))
}
# Function for calculating production schedule for individual well
# using Eq.[2]
Qprodfun <- function(x) {
# Get number of zeros to include (from months before well was
# drilled and from delay between well drilling and start of first
# decline curve)
TDzeros <- ifelse(test = (x[1]-1)+x[2] >= 0,
yes = (x[1]-1)+x[2],
no = 0)
# If equal to or greater than timesteps
if (TDzeros >= timesteps) {
# Then all production values for this well are zero
TD <- rep(0, times = timesteps)
# And the production volume vector Q is null
Q <- NULL
} else {
# The number of zero values is equal to TDzeros
TD <- rep(0, times = TDzeros)
# Create vector of months for which production will be
# calculated. Note that time vector is increased by value of tend
# to account for how old a prior well might be.
tvec <- c(1:(timesteps-TDzeros))+x[5]
# If tvec[1] > 1, then well is a prior well and production during
# the first time step of the simulation also requires knowing
# production at time step prior to start of simulation, so add
# addition time vector value
if (tvec[1] != 1) {tvec <- c(tvec[1]-1, tvec)}
# Calculate cumulative production in each month by Eq.[2]
Q <- x[3]*sqrt(tvec)+x[4]
# If well is a prior well, then the length of Q will be 1 greater
# than timesteps value
if (length(Q) > timesteps) {
# Find monthly production solely by successive differences
Q <- diff(Q)
} else {
# The well is a new well and the production is the value of
# Q[1] plus the successive differences between each timestep as
# calculated by diff()
Q <- c(Q[1], diff(Q))
}
}
# Return concatonated result
return(c(TD, Q))
}
# Calculate production --------------------------------------------------
# Apply prodfun on each row of coefficient matrices to calculate oil
# and gas production schedule
switch(decline.type,
a = {
osim <- t(apply(coef.oil, MARGIN = 1, FUN = prodfun))
gsim <- t(apply(coef.gas, MARGIN = 1, FUN = prodfun))
},
b = {
osim <- t(apply(coef.oil, MARGIN = 1, FUN = Qprodfun))
gsim <- t(apply(coef.gas, MARGIN = 1, FUN = Qprodfun))
})
# Check for and overwrite any negative production values
osim <- ifelse(test = osim < 0 , yes = 0, no = osim)
gsim <- ifelse(test = gsim < 0 , yes = 0, no = gsim)
# Well reworks ----------------------------------------------------------
# To account for reworks, rewrite any wells with nonzero rework values with
# zeroes starting from date of rework
for (i in 1:length(wsim$rework)) {
# If nonzero and not NA rework value
if (wsim$rework[i] > 0 & !is.na(wsim$rework[i])) {
# Zero out production after rework
osim[i,wsim$rework[i]:timesteps] <- 0
gsim[i,wsim$rework[i]:timesteps] <- 0
}
}
# Return results --------------------------------------------------------
return(list(osim = osim, gsim = gsim))
} |
7bfb8c639ed10717f1123e1257f46e9d83cf2471 | c02eaff898e8f4eedb53bd8d3872a069458f1dc3 | /R/003_configurar_proyecto.R | fd1f39abc7c8539b31d768bac98a197adbd7a747 | [] | no_license | pabloalban/IESS_petroleros | 57ef32797e79d5b2f7cfb1586b1fcac402de6aae | c442be8f1cd86770fb9fd943fb155bdb03df2a60 | refs/heads/master | 2023-01-24T04:09:45.062537 | 2020-09-19T06:04:55 | 2020-09-19T06:04:55 | 294,846,035 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,212 | r | 003_configurar_proyecto.R | # Parámetros globales R ----------------------------------------------------------------------------
message( paste( rep('-', 100 ), collapse = '' ) )
message( '\tConfiguración global de R' )
options( scipen = 99 )
setNumericRounding( 2 )
options( stringsAsFactors = FALSE )
# Parámetros ---------------------------------------------------------------------------------------
message( '\tCreando entorno de parámetros' )
# Entorno con parámetros
parametros <- new.env()
# User name
parametros$user <- Sys.getenv( 'USER' )
parametros$fec_eje <- Sys.Date()
# Operating system name
parametros$opsys <- Sys.info()[[1]]
# Hostname
parametros$hostname <- Sys.info()[[4]]
#Servidor de datos
if ( parametros$hostname %in% c( 'huracan', 'tornado', 'lahar', 'empty', 'tifon','LEOVELEZ',
'temu-Ubuntu', 'ava.local','DESKTOP-380U0P5', 'DESKTOP-N4VHK6P',
'HP-USER', 'LPUIOMTZ011YPH') ) {
# global Risko
parametros$data_server <- '/mnt/data/IESS/IESS_estudio/'
if ( parametros$hostname %in% c('LEOVELEZ') ){ # máquina samsung
parametros$data_server <- 'Z:/IESS/IESS_estudio/'
}
if ( parametros$hostname %in% c('ava.local') ){ # máquina samsung
parametros$data_server <- '/Volumes/data/IESS/IESS_estudio/'
}
if ( parametros$hostname %in% c('DESKTOP-380U0P5') ){ # máquina teletrabajo
parametros$data_server <- paste0( getwd(), '/' )
}
if( parametros$hostname %in% c('DESKTOP-N4VHK6P', 'LPUIOMTZ011YPH') ){
parametros$data_server <- paste0( getwd(), '/' )
}
if( parametros$hostname %in% c('HP-USER') ){
parametros$data_server <- paste0( getwd(), '/' )
}
} else {
# global: se necesita acceso a la red de la direccion actuarial para conectarse
parametros$data_server <- paste0( getwd(), '/' )
}
# local
# parametros$data_server <- paste0( getwd(), '/' )
# Directorio de trabajo
parametros$work_dir <- paste0( getwd(), '/' )
# Setting Time Zone
parametros$time_zone <- "America/Guayaquil"
# Colores IESS
parametros$iess_blue <- rgb( 0, 63, 138, maxColorValue = 255 )
parametros$iess_green <- rgb( 0, 116, 53, maxColorValue = 255 )
# Calcular balance
# parametros$calcular_balance <- FALSE
parametros$mont_prop_afi <- 0.1275
# Direcciones globables ---------------------------------------------------------------------------
message( '\tEstableciendo directorios globales' )
parametros$empresa <- 'IESS'
message( '\tConfiguración seguro' )
# message( '\t\tLas opciones son: IVM, SAL, RTR, DES, SSC, CES' )
parametros$seguro <- 'PE'
# if ( !( parametros$seguro %in% c( 'IVM', 'SAL', 'RTR', 'DES', 'SSC', 'CES' ) ) ) {
# stop( 'El seguro ingresado no está entre las opciones' )
# }
# Parametro realizar análisis demográfico
parametros$hacer_ana_dem <- FALSE
parametros$calcular_balance <- FALSE
# Configuraciones particulares por seguro ----------------------------------------------------------
parametros$fec_fin <- ymd( '2018-12-31' )
parametros$anio_ini <- 2018
parametros$anio <- 2019 # Año del estudio
parametros$edad_max <- 105
parametros$horizonte <- 20 # en años
parametros$fec_ini <- ymd( '2013-01-01' ) # fecha inicio del periodo de observación
parametros$reserva_ini <- 764254662.48 # reserva inicial
parametros$ana_dem <- paste0( parametros$work_dir, 'R/300_analisis_demografico.R' )
# Variables automáticas ----------------------------------------------------------------------------
parametros$RData <- paste0( parametros$data_server, 'RData/' )
parametros$Data <- paste0( parametros$data_server, 'Data/' )
parametros$RData_seg <- paste0( parametros$data_server, 'RData/' )
parametros$Data_seg <- paste0( parametros$data_server, 'Data/' )
parametros$reportes <- paste0( parametros$work_dir, 'Reportes/' )
parametros$resultados <- paste0( parametros$work_dir, 'Resultados/' )
parametros$reporte_seguro <- paste0( parametros$work_dir, 'Reportes/Reporte_',
parametros$seguro, '/' )
parametros$calculo_balance <- paste0( parametros$work_dir, 'R/310_calculo_escenarios_balance.R' )
parametros$reporte_genera <- paste0( parametros$work_dir, 'R/600_reporte_latex.R' )
parametros$reporte_script <- paste0( parametros$reporte_seguro, 'reporte.R' )
parametros$reporte_nombre <- paste0( parametros$empresa, '_',
parametros$seguro, '_estudio_actuarial' )
parametros$reporte_latex <- paste0( parametros$reporte_nombre, '.tex' )
parametros$resultado_seguro <- paste0( parametros$resultados, parametros$reporte_nombre, '_',
format( parametros$fec_eje, '%Y_%m_%d' ), '/' )
parametros$resultado_tablas <- paste0( parametros$resultados, parametros$reporte_nombre, '_',
format( parametros$fec_eje, '%Y_%m_%d' ), '/tablas/' )
parametros$resultado_graficos <- paste0( parametros$resultados, parametros$reporte_nombre, '_',
format( parametros$fec_eje, '%Y_%m_%d' ), '/graficos/' )
parametros$graf_modelo_1 <- 'R/401_graf_plantilla.R'
parametros$graf_ext <- '.png'
message( paste( rep('-', 100 ), collapse = '' ) )
rm( list = ls()[ !( ls() %in% 'parametros' ) ] )
gc()
|
dfd5775fbeff2c8ce342b56a419dd8fe3e4e15ae | e6225189e3d812d1075cd13e6decbc009a4a681c | /ui.R | 4007d26a9a847cc84cc2933fa4b8a2edff3d0d94 | [] | no_license | calypah/datascience | dfa44ee0d8be3ffeb3056d6d812e1786cf1e9941 | a3ebe1d5fce098014e039c04e94424d0cebc06a5 | refs/heads/master | 2023-03-17T09:21:53.821214 | 2016-10-03T20:26:48 | 2016-10-03T20:26:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,437 | r | ui.R | library(shiny)
library(shinyjs)
shinyUI(fluidPage(
# init shinyjs
useShinyjs(),
# Application title
titlePanel("Word prediction using stupid backoff algorithm"),
# Sidebar
sidebarLayout(
sidebarPanel(
h4("Description"),
p("This application, given an input text, gives a prediction of the next word that\
should follow the text."),
p(" "),
h4("Usage"),
p("Enter your text in the text box labeled ", em("Input phrase,"), "and press the ",
em("Predict"), "button. A list of up to five predicted words, along with their \
score (i.e., probability) is show."),
p(strong("Notes:")),
tags$ul(
tags$li("The", em("Predict"), "button is disabled until initialization is \
complete - this might take a few seconds."),
tags$li("Punctuation is ignored - it is neither taken into account for prediction,\
nor is shown as a predicted word.")
)
),
mainPanel(
textInput("in.phrase", "Input phrase:"),
disabled(actionButton("goButton", "Predict")),
p(" "),
p(" "),
tags$br(),
p(strong("Predicted words:")),
tableOutput("pred")
)
)
)) |
8178e89a6751d2079ce7ed6f7cdad9318d6bca88 | 59d95eeba9a85df2205084a1939a52f11198411c | /plot4.R | 2ecea35151710881bb369391d8fcfcc8a3ed0c6a | [] | no_license | sachinbb5/ExpData | 0eb7d042770c29c5398050d73ad13f64273cc5c7 | 6fec34eca78eb16cb2ce0de3b1357dae27920a40 | refs/heads/master | 2020-12-31T04:28:40.210726 | 2016-01-10T20:41:37 | 2016-01-10T20:41:37 | 49,381,746 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,090 | r | plot4.R | # Plot 3. Data set should be in workdir with scripts.
# Read Data
pc <- read.csv("household_power_consumption.txt", na.string="?", sep=";")
# Extract time range dataset
pc <- pc[(pc$Date=="1/2/2007" | pc$Date=="2/2/2007"),]
# Combine Date and Time
pc$DateTime <- strptime(paste(pc$Date, pc$Time, sep=" "),
format="%d/%m/%Y %H:%M:%S")
# Open png device
png("plot4.png", width=480, height=480)
par(mfrow=c(2,2))
#1
#plot 2
plot(pc$DateTime, pc$Global_active_power, type="l",
xlab="", ylab="Global Active Power")
#2
plot(pc$DateTime,pc$Voltage , type='l', xlab='datetime', ylab='Voltage')
#3
#plot 3
#plot lines and add legend
plot(pc$DateTime,pc$Sub_metering_1, type='l', xlab='', ylab='Energy sub metering')
lines(pc$DateTime,pc$Sub_metering_2, col="red")
lines(pc$DateTime,pc$Sub_metering_3, col="blue")
legend('topright',c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c("black","red","blue"))
#4
#plot 4
plot(pc$DateTime,pc$Global_reactive_power, type='l', xlab='datetime', ylab='Global_reactive_power')
#close device
dev.off() |
f971e9898901ad29fc4aab58db687b9b5ee69885 | 876670c7f171f4a30d08f2e41f491783ae9b3e23 | /plots/figure_3/figure_3_timecourse_plots.R | 8f753030a660453d4300bb3394769f255738760d | [] | no_license | Jun-Lizst/SingleCellLineage | 82f873c393e4e4f056cec03d359b274e937a19de | 20ec99b84a762fc627446b55f5453f0cdb565be4 | refs/heads/master | 2023-01-14T15:41:13.348121 | 2020-11-21T19:38:13 | 2020-11-21T19:38:13 | 327,058,591 | 1 | 0 | null | 2021-01-05T16:39:48 | 2021-01-05T16:39:47 | null | UTF-8 | R | false | false | 4,359 | r | figure_3_timecourse_plots.R | library(reshape2)
library(ggplot2)
library('ggthemes')
txt <- read.delim("/mount/vol10/CRISPR.lineage/nobackup/2016_03_05_Embryos/embryo_summary.txt",stringsAsFactors = F)
txt = txt[txt$passHMIDs > 100,]
txt$type <- sapply(txt$sample,function(x) {unlist(strsplit(x,"_"))[1]})
txt$rate <- sapply(txt$sample,function(x) {unlist(strsplit(x,"_"))[3]})
txt$type = factor(txt$type,levels=c("Dome","epi90","30hr","3d"))
txt = txt[order(txt$type,txt$meanSitesEdited),] #
txt$index = seq(1,nrow(txt))
melt.txt = melt(txt,measure.vars = c("meanSitesEdited","meanCutSites"),id.vars = c("sample","type","index","rate"))
txt$type = as.character(txt$type)
txt$type[txt$type == "Dome"] = "4.3 hpf"
txt$type[txt$type == "epi90"] = "9 hpf"
txt$type[txt$type == "30hr"] = "30 hpf"
txt$type[txt$type == "3d"] = "72 hpf"
txt$type = factor(txt$type,levels=c("4.3 hpf","9 hpf","30 hpf","72 hpf"))
# color_set = c("#FF5F00","#0107FA")
# color_set = c("#999999", "#E69F00", "#56B4E9")
color_set = c("#999999", "#01A4AC","black")
# we dont have a ton of points, so maybe this isn't the best representation. Turn on jitter to see
editing.by.embryo = ggplot(txt) + # geom_jitter(position=position_dodge(1)) +
scale_fill_manual(values=color_set,name="Concentration") + theme_tufte() +
geom_jitter(aes(y=meanSitesEdited/10.0, x=type, fill=rate), colour="black",pch=21, size=3, position=position_dodge(1)) +
ylab("Edited proportion") + xlab("Developmental stage") +
theme(axis.text=element_text(family = "sans", size=15)) +
theme(axis.title=element_text(family = "sans", size=15)) + ylim(c(0,1)) +
geom_smooth(method = "lm", se = FALSE)
editing.by.embryo = ggplot(txt) + # geom_jitter(position=position_dodge(1)) +
scale_fill_manual(values=color_set,name="Concentration") + theme_tufte() +
geom_bar(aes(y=meanSitesEdited/10.0,x=sample, fill=rate),stat="identity") +
ylab("Edited proportion") + xlab("Developmental stage") +
theme(axis.text=element_text(family = "sans", size=15)) +
theme(axis.title=element_text(family = "sans", size=15)) + ylim(c(0,1)) +
geom_smooth(method = "lm", se = FALSE)
unique.by.embryo = ggplot(txt) + # geom_jitter(position=position_dodge(1)) +
scale_fill_manual(values=color_set,name="Concentration") + theme_tufte() +
geom_jitter(aes(y=uniqueHMIDs/passHMIDs, x=type, fill=rate), colour="black",pch=21, size=3, position=position_dodge(.3)) +
ylab("Unique HMID proportion") + xlab("Developmental stage") +
theme(axis.text=element_text(family = "sans", size=15)) +
theme(axis.title=element_text(family = "sans", size=15)) + ylim(c(0,1)) +
geom_smooth(method = "lm", se = FALSE)
# ggsave(editing.by.embryo,file="embryo_editing_by_type.pdf",width=5,height=3)
ggsave(editing.by.embryo,file="embryo_editing_by_type.png",width=5,height=3,dpi = 300)
ggsave(unique.by.embryo,file="unique_editing_by_type.png",width=5,height=3,dpi = 300)
unique.by.embryo = ggplot(txt) +
geom_point(aes(x=passHMIDs, y=uniqueHMIDs, col=rate, shape=type), size=3) +
geom_rangeframe() +
theme_tufte() +
scale_shape_manual(values = c(15, 16, 17, 18),name="Developmental\nstage") +
scale_color_manual(values=color_set,name="Concentration") +
ylab("Unique HMIDs") +
xlab("Total captured HMIDs") +
geom_line(aes(x=passHMIDs, y=uniqueHMIDs/passHMIDs, col="black"), size=1) +
theme(legend.text=element_text(family = "sans", size=12)) +
theme(legend.title=element_text(family = "sans", size=12)) +
theme(axis.text=element_text(family = "sans", size=12)) +
theme(axis.title=element_text(family = "sans", size=12))
unique.by.embryo = ggplot(txt) +
geom_bar(aes(x=passHMIDs, y=uniqueHMIDs, col=rate, shape=type), size=3) +
geom_rangeframe() +
theme_tufte() +
scale_shape_manual(values = c(15, 16, 17, 18),name="Developmental\nstage") +
scale_color_manual(values=color_set,name="Concentration") +
ylab("Unique HMIDs") +
xlab("Total captured HMIDs") +
theme(legend.text=element_text(family = "sans", size=12)) +
theme(legend.title=element_text(family = "sans", size=12)) +
theme(axis.text=element_text(family = "sans", size=12)) +
theme(axis.title=element_text(family = "sans", size=12))
# ggsave(unique.by.embryo,file="unqiue_vs_captured_HMIDs_by_stage.pdf",width=5,height=3)
ggsave(unique.by.embryo,file="unqiue_vs_captured_HMIDs_by_stage.png",width=5,height=3)
|
e6d501f026f2c4924b1d6f68b9d6ee116b83ed30 | 5ea9a574c5a0452ecc5a8a49ffe4334936eed22b | /DMSA/R/R-Model/employee.R | 3cf43cb2566df0ba8068a238402253289a96bbe1 | [] | no_license | navalvaidya/TE-PL1 | bc4b1712f76ca1f35f370ecac89f657adfc7cd02 | e01bf8db4ac060668627dab5eca7f4d3a01c47c7 | refs/heads/master | 2021-01-10T07:02:50.883619 | 2015-10-06T15:00:56 | 2015-10-06T15:00:56 | 43,758,047 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,769 | r | employee.R | library(RJSONIO)
emp <- fromJSON("employee.json")
str(emp)
performace.obj <- c(emp[[1]]$performance,emp[[2]]$performance,emp[[3]]$performance,emp[[4]]$performance,emp[[5]]$performance,emp[[6]]$performance,emp[[7]]$performance,emp[[8]]$performance,emp[[9]]$performance,emp[[10]]$performance,emp[[11]]$performance,emp[[12]]$performance,emp[[13]]$performance,emp[[14]]$performance,emp[[15]]$performance,emp[[16]]$performance,emp[[17]]$performance,emp[[18]]$performance,emp[[19]]$performance,emp[[20]]$performance,emp[[21]]$performance,emp[[22]]$performance,emp[[23]]$performance,emp[[24]]$performance,emp[[25]]$performance,emp[[26]]$performance,emp[[27]]$performance,emp[[28]]$performance,emp[[29]]$performance,emp[[30]]$performance,emp[[31]]$performance,emp[[32]]$performance,emp[[33]]$performance,emp[[34]]$performance,emp[[35]]$performance,emp[[36]]$performance,emp[[37]]$performance,emp[[38]]$performance,emp[[39]]$performance,emp[[40]]$performance,emp[[41]]$performance,emp[[42]]$performance,emp[[43]]$performance,emp[[44]]$performance,emp[[45]]$performance,emp[[46]]$performance,emp[[47]]$performance,emp[[48]]$performance,emp[[49]]$performance,emp[[50]]$performance)
class(performace.obj)
summary(performace.obj)
factor(performace.obj)
table(performace.obj)
#analysis on attendace
attendance.obj <- c(emp[[1]]$attendance,emp[[2]]$attendance,emp[[3]]$attendance,emp[[4]]$attendance,emp[[5]]$attendance,emp[[6]]$attendance,emp[[7]]$attendance,emp[[8]]$attendance,emp[[9]]$attendance,emp[[9]]$attendance,emp[[10]]$attendance,emp[[11]]$attendance,emp[[12]]$attendance,emp[[13]]$attendance,emp[[14]]$attendance,emp[[15]]$attendance,emp[[16]]$attendance,emp[[17]]$attendance,emp[[18]]$attendance,emp[[19]]$attendance,emp[[20]]$attendance,emp[[21]]$attendance,emp[[22]]$attendance,emp[[23]]$attendance,emp[[24]]$attendance,emp[[25]]$attendance,emp[[26]]$attendance,emp[[27]]$attendance,emp[[28]]$attendance,emp[[29]]$attendance,emp[[30]]$attendance,emp[[31]]$attendance,emp[[32]]$attendance,emp[[33]]$attendance,emp[[34]]$attendance,emp[[35]]$attendance,emp[[36]]$attendance,emp[[37]]$attendance,emp[[38]]$attendance,emp[[39]]$attendance,emp[[40]]$attendance,emp[[41]]$attendance,emp[[42]]$attendance,emp[[43]]$attendance,emp[[44]]$attendance,emp[[45]]$attendance,emp[[46]]$attendance,emp[[47]]$attendance,emp[[48]]$attendance,emp[[49]]$attendance)
summary(attendance.obj)
hist(attendance.obj)
hist(attendance.obj, xlab="attendance of employees in percentage", ylab = "no. of employees", main = "histogram on attendance")
plot(factor(performace.obj),attendance.obj, xlab="performance of employee", ylab="attendance of employee")
#analysis on results
product <- lapply(emp[1:50], "[[", "results_prod")
z <- data.frame(product)
b <- unlist(z)
plot(b,attendance.obj)
|
cb9b45345659d4a029016f1a4662e0560dd2afe1 | a37eb696509854770de107f476d6b623f7f37aa2 | /man/sntd.a.Rd | db0a4dad9bad9e9678bb376c18d576e46a24ed0f | [] | no_license | NGSwenson/lefse_0.5 | 2cff67290caca266ac1cfd46109c9892af2b80d7 | 74814e2f4f8a1f0244da8e8a48bb17897718db67 | refs/heads/master | 2021-01-10T14:59:58.336073 | 2015-11-19T23:16:07 | 2015-11-19T23:16:07 | 46,526,271 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 930 | rd | sntd.a.Rd | \name{sntd.a}
\alias{sntd.a}
\title{ Functional standard deviation of the NND with abundance weighting }
\description{ Quantifies the functional standard deviation of the nearest neighbor distance with abundance weighting }
\usage{ sntd.a(dist.mat, my.sample) }
\arguments{
\item{dist.mat}{ A trait distance matrix. }
\item{my.sample}{ A community data matrix. }
}
\details{ Quantifies the functional standard deviation of the nearest neighbor distance with abundance weighting }
\value{
\bold{ sntd.a }
The abundance weighted standard deviation of the nearest functional neighbor distance for each community
}
\references{ Swenson, N.G. 2014. Functional and Phylogenetic Ecology in R. Springer UseR! Series, Springer, New York, New York, U.S.A. }
\author{ Nathan G. Swenson }
\seealso{ \code{\link{Fsntd}} }
\examples{
data(lefse.sample)
data(lefse.traits)
sntd.a(as.matrix(dist(lefse.traits)), lefse.sample)
}
|
98bbab991b33aed28d1476c53aea01053f791049 | 3c95609b2555c4fe1d6a08efed097fd4093d20d0 | /Visualizacion/Shiny/muestras aleatorias/UI.R | 431952f9f86112a00bf9ae76af602cf95ab6c3ff | [] | no_license | ManuelCarmonaDS/Master-in-Data-Science---CUNEF | 97e90eb948ab3931c8e6ac95de85cbb0ba1fc15f | 36a7032bbb14143121cf65b70c1841f16e4d8d82 | refs/heads/master | 2021-09-07T19:02:17.739214 | 2018-02-27T15:56:57 | 2018-02-27T15:56:57 | 109,852,006 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,271 | r | UI.R | library(shiny)
shinyUI(fluidPage(
titlePanel("Generacion de muestras aleatorias"),
sidebarLayout(
sidebarPanel(
numericInput("n", label = h3("Muestra"), min= 1, max= 10000, value = 100, step= 10), # Tamaño de la muestra
numericInput("correlacion", label = h3("Correlacion Pearson"), min= -1, max=1, value = 1, step=.1), # Correlacion
actionButton("generala", label = "Generar muestra aleatoria") # Boton generar muestra
),
mainPanel(
fluidRow(
plotOutput("grafica", click="punto"), # Output de la grafica e input de los clicks
verbatimTextOutput("exitcorrelacion") # Output del texto con la correlacion
)
)
)
)) |
0d1295cccd975abfcb60871880375419b9998221 | ff8272e4d55dcada2597638fcae9d56ff80b4fc7 | /genhet script AA.r | b1c2d62dec77a3047f6c02057f369bf14268e774 | [] | no_license | AdrianAllen1977/R-code | 537c08dd29d29514661c7e3b35679bef46e96c54 | 1a1cd56007bfca2cae36a05537402b13b0df94b0 | refs/heads/master | 2023-07-20T08:25:56.300787 | 2023-07-17T18:20:42 | 2023-07-17T18:20:42 | 172,774,255 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 924 | r | genhet script AA.r | ### Using genhet to determine individual levels of heterozygosity
##Set working drive
setwd("Desktop/R stuff/Irish badgers/NI badger heterozygosity July 18")
### Import data file of genotypes - convert all unknown genotypes (0s) to NA
data<-read.table("NI_RTA_badger_genos.txt", sep="\t", header=T)
### Create and import list of locus names - create text file of list of loci, in same order as they appear in the genoype file. and simply import as below
locusname<-scan("Badger_STR_locus_list.txt", what="character", sep="\t")
### Open the gtools library
library(gtools)
## From the Genhet.r text file, paste in the full text of all the commands which will create a function in R.
### Then run the function using the following script.
het<-GENHET(dat=data,estimfreq="T",locname=locusname)
###Write table of heterozygosity metrics per sample.
write.table(het,"NI badger heterozygosity.txt",sep="\t",row.names=F,quote=F) |
9f7ca0d571d7cbfdcfeab3a4963d908ba46c7336 | 4bcfc4d7e729f5806ff4e118d46435383d1aefa0 | /man/psoriasis100.Rd | 9e038ede909ef548b1e9f98dff5b0850499b85cd | [] | no_license | cran/MBNMAdose | c2d8db8efb642d8002fd3254f7898401e51a3824 | 7147691aa7d4c4721f8c1c141318d62d8e5d2a17 | refs/heads/master | 2023-08-18T09:48:29.048696 | 2023-08-08T13:00:19 | 2023-08-08T14:31:02 | 198,422,970 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,553 | rd | psoriasis100.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{psoriasis100}
\alias{psoriasis100}
\title{Studies of biologics for treatment of moderate-to-severe psoriasis (100\% improvement)}
\format{
A data frame in long format (one row per arm and study), with 81 rows and 9 variables:
\itemize{
\item \code{studyID} Study identifiers
\item \code{agent} Character data indicating the agent to which participants were randomised
\item \code{dose_mg} Numeric data indicating the dose to which participants were randomised in mg
\item \code{freq} Character data indicating the frequency of the dose to which participants were randomised
\item \code{dose} Numeric data indicating the dose in mg/week to which the participants were randomised
\item \code{n} Numeric data indicating the number of participants randomised
\item \code{r} Numeric data indicating the number of participants who achieved 100\% improvement in PASI score after 12 weeks
}
}
\usage{
psoriasis100
}
\description{
A dataset from a systematic review of Randomised-Controlled Trials (RCTs) comparing biologics at different doses and placebo
\insertCite{warren2019}{MBNMAdose}. The outcome is the number of patients experiencing 100\% improvement on the Psoriasis
Area and Severity Index (PASI) measured at 12 weeks follow-up. The dataset
includes 19 Randomised-Controlled Trials (RCTs), comparing 8 different biologics at different doses with placebo.
}
\references{
\insertAllCited{}
}
\keyword{datasets}
|
2ba21a90f48c2818e0f498272b4cc36edc346db9 | 0f7b338d9056c39764f6edcfcdc4ef7f041720c3 | /inst/doc/numberLines.R | 85966af927231bb7bc237dc643bc3270f6fb08fd | [] | no_license | cran/chunkhooks | 3d90589b7e1669d64d7e6ec441cea90ed0466274 | 418896d456718f737c008a6203dc7d4d1b264998 | refs/heads/master | 2022-11-26T21:19:11.250311 | 2020-08-05T14:00:02 | 2020-08-05T14:00:02 | 286,204,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 792 | r | numberLines.R | ## ----setup--------------------------------------------------------------------
library(chunkhooks)
hook_numberLines()
## -----------------------------------------------------------------------------
# By default, Source lines are numbered
"Output lines are not numbered"
## -----------------------------------------------------------------------------
hook_numberLines(c("source", "output"))
## -----------------------------------------------------------------------------
# After `hook_numberLines(c("source", "output"))`,
# Source lines are numbered
"Output lines are also numbered"
## ---- numberLines="output"----------------------------------------------------
# numberLines="output" is specified as a chunk option
# Source lines are not numbered
"Output lines are also numbered"
|
87fb69a05ed719bb260fb1924d7e84670b06e2e2 | 1d8bf2931c5b530c8c11c42940ed5dc29fa4512f | /man/tau3scen1.Rd | c9d2913bdba5a17c8c9b10ceedf9f374be7bb4c0 | [] | no_license | cran/chi2x3way | ce5ad54fbd65f0699a01b8f779c18bbd55acde45 | c58f0523734bbd3d14a33b1dc6db1ea2f2ccdcc0 | refs/heads/master | 2021-01-12T10:07:23.826604 | 2017-01-23T20:11:06 | 2017-01-23T20:11:06 | 76,366,790 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,266 | rd | tau3scen1.Rd | \name{tau3scen1}
\alias{tau3scen1}
\title{
Marcotorchino's index for three-way contingency tables under Scenario 1
}
\description{
It provides the partition of the Marcotorchino's index and its related $C_M$-statistic under the Scenario 1 when probabilities are homogeneous.
}
\usage{
tau3scen1(X, pi=rep(1/dim(X)[[1]],dim(X)[[1]]), pj=rep(1/dim(X)[[2]],dim(X)[[2]]),
pk=rep(1/dim(X)[[3]],dim(X)[[3]]), digits = 3)
}
\arguments{
\item{X}{The three-way contingency table.}
\item{pi}{The input parameter for specifying the theoretical probabilities of rows categories. When \code{scen = 1}, they can be prescribed by the analyst. \cr
By default, they are set equal among the categories, homogeneous margins (uniform probabilities), that is \code{pi = rep(1/dim(X)[[1]],dim(X)[[1]])}.
}
\item{pj}{The input parameter for specifying the theoretical probabilities of columns categories. When \code{scen = 1}, they can be prescribed by the analyst. \cr
By default, they are set equal among the categories, homogeneous margins (uniform probabilities), that is \code{pj = rep(1/dim(X)[[2]],dim(X)[[2]])}.
}
\item{pk}{The input parameter for specifying the theoretical probabilities of tube categories. When \code{scen = 1}, they can be prescribed by the analyst. \cr
By default, they are set equal among the categories, homogeneous margins (uniform probabilities), that is \code{pk = rep(1/dim(X)[[3]],dim(X)[[3]])}.
}
\item{digits}{The minimum number of decimal places, \code{digits}, used for displaying the numerical summaries of the analysis. By default, \code{digits = 3}.
}
}
\value{
Description of the output returned
\item{z}{The partition of the Marcotorchino's index, of the $C_M$-statistic and its revised formula, under Scenario 1. We get seven terms partitioning
the Marcotorchino's index and the related $C_M$-statistic: three main terms, two bivariate terms and a trivariate term.
The output is in a matrix, the six rows of this matrix indicate the tau index numerator, the tau index, the percentage of explained inertia, the $C_M$-statistic,
the degree of freedom, the p-value, respectively.}
}
\references{
Beh EJ and Lombardo R (2014) Correspondence Analysis: Theory, Practice and New Strategies. John Wiley & Sons.\cr
Lancaster H O (1951) Complex contingency tables treated by the partition of the chi-square. Journal of Royal Statistical Society, Series B, 13, 242-249. \cr
Loisel S and Takane Y (2016) Partitions of Pearson's chi-square ststistic for frequency tables: A comprehensive account. Computational Statistics, 31, 1429-1452.\cr
Lombardo R Carlier A D'Ambra L (1996) Nonsymmetric correspondence analysis for three-way contingency tables. Methodologica, 4, 59-80. \cr
Marcotorchino F (1985) Utilisation des comparaisons par paires en statistique des contingencies: Partie III. Etude du Centre Scientifique, IBM, France. No F 081
}
\author{
Lombardo R and Takane Y}
\note{
This function belongs to the class \code{chi3class}.
}
\examples{
data(olive)
tau3scen1(olive)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{Marcotorchino}% use one of RShowDoc("KEYWORDS")
\keyword{three-way index}% __ONLY ONE__ keyword per line
|
0be9b31ff2397367682955422059ceb3e610bf36 | 05bb165785e0f5c697fc1d863dc8e1f862f2e639 | /Q2_Data.R | a758fbfdac9c1167f995350c4c299c39c4421632 | [] | no_license | 7446Nguyen/OLS-and-Time-Series-Prediction | 8b72393bf3f38e3a93323f1396231200e33460b8 | 49cb250d2e94d4f4c0ffc148cd263292c52afbee | refs/heads/master | 2022-02-23T07:22:45.731379 | 2019-10-07T04:35:59 | 2019-10-07T04:35:59 | 208,175,251 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,718 | r | Q2_Data.R | library(pacman)
p_load(lmtest
,dplyr
,Hmisc
,skimr
,tidyr
,na.tools
,tidyverse
,olsrr
,caret
,multcomp
,ggthemes
,MASS# for OLS
,regclass# for VIF
,stats
,glmnet
,sjPlot
,sjmisc
,ggplot2
,xlsx)
#format dates:
df <- read.csv("./modelingData.csv", header=T, sep=",", strip.white=T, stringsAsFactors = F)
df <- data.frame(df$id, df$timestamp, df$price_doc)
colnames(df) <- c("id", "timestamp", "price_doc")
df <- df %>% mutate(timestamp = as.Date(timestamp, origin="1899-12-30"))
tryFormats = c("%Y-%m-%d", "%Y/%m/%d")
timestamp2 <- df$timestamp
df <- data.frame(timestamp2, df)
df <- df %>% mutate(timestamp = as.Date(timestamp, origin="1899-12-30"))
tryFormats = c("%Y-%m-%d", "%Y/%m/%d")
df <- df %>% separate(timestamp2, sep="-", into = c("year", "month", "day"))
monthYear <- as.factor(paste0(df$year,df$month))
df <- data.frame(df$id, df$timestamp, df$month, df$day, df$year, monthYear, df$price_doc)
colnames(df) <- c("id", "timestamp", "month", "day", "year", "monthYear", "price_doc")
write.csv(df, "timeSeriesA.csv", row.names = F)
df2 <- data.frame(monthYear, df)
df2 <- data.frame(df2$month, df2$monthYear, df2$price_doc)
colnames(df2) <- c("MonthNumber", "monthYear", "price_doc")
# creating ordered dataframe for overall medians for each state
dfAvgPriceMoYr <- df2 %>% group_by(monthYear, MonthNumber) %>% summarise(AvgPrice = mean(price_doc)) %>% data.frame()
df2 <- data.frame(dfAvgPriceMoYr$MonthNumber, dfAvgPriceMoYr$monthYear, dfAvgPriceMoYr$AvgPrice)
colnames(df2) <- c("MonthNumber", "monthYear", "AvgPrice")
write.csv(df2,"timeSeriesB.csv", row.names = F) |
4e4dadcfa15d5733ff3d8a502209a5cc9aa8ee56 | 2099a2b0f63f250e09f7cd7350ca45d212e2d364 | /DUC-Dataset/Summary_p100_R/D105.FT921-5959.html.R | 6bd546ff2f817a22dbc69aa0b296e0f53e13c081 | [] | no_license | Angela7126/SLNSumEval | 3548301645264f9656b67dc807aec93b636778ef | b9e7157a735555861d2baf6c182e807e732a9dd6 | refs/heads/master | 2023-04-20T06:41:01.728968 | 2021-05-12T03:40:11 | 2021-05-12T03:40:11 | 366,429,744 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 963 | r | D105.FT921-5959.html.R | <html>
<head>
<meta name="TextLength" content="SENT_NUM:6, WORD_NUM:105">
</head>
<body bgcolor="white">
<a href="#0" id="0">Armenians in the enclave want unification with Armenia, but Azerbaijan will not give up any territory.</a>
<a href="#1" id="1">No dates have been given for the withdrawal.</a>
<a href="#2" id="2">Marshal Yevgeny Shaposhnikov, the Commonwealth of Independent States' defence minister, announced the decision as fighting worsened in the disputed territory in spite of efforts to secure a truce.</a>
<a href="#3" id="3">Troops under fire along the Armenian-Azerbaijani border will also be withdrawn.</a>
<a href="#4" id="4">The enclave is administered by Azerbaijan but largely populated by Armenians.</a>
<a href="#5" id="5">The order for the pull-out of the 366th motorised regiment follows the deaths earlier this week of a handful of soldiers caught in the fighting between Armenian guerrillas and Azerbaijani forces.</a>
</body>
</html> |
19976dc3f9473ec7daac2212c79d0f107b8c2ccd | 3f3b5ceebbad93ad7ce83405bf74a375c6323ac7 | /Abhinaya_project.R | fda8b67cafb19ab9616d27d9abd82166ef6d9592 | [] | no_license | abhinayaiyer/Twitter-Sentiment-Analysis | 9d446751466a87374d75393fb9d86fa6ccffa7af | 93d0eafc2ddc4a79e0e0d3377452ac2846d307d1 | refs/heads/master | 2020-03-08T02:36:47.017771 | 2018-04-05T05:01:57 | 2018-04-05T05:01:57 | 127,865,132 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,168 | r | Abhinaya_project.R |
install.packages("twitteR")
install.packages("stringr")
install.packages("plyr")
library(stringr)
library(twitteR)
library(plyr)
api_key<- "GP90VZhZe4Ud0kzXzXGGxsjr2"
api_secret <- "JUr6cKf7EnFfI8zqFVBFKQJ4iEYx6Y03rnfFbAx174aJYgO9kV"
access_token <- "3302893622-K5oJ6OlZOLiSZiDvtJFWZ4c7m1OvDrSYLmnvnco"
access_token_secret <- "4tQ2ECOIEo0uOOFY4ilSJ4a9QgQTyLRWMWsBUgYndxqyl"
setup_twitter_oauth(api_key,api_secret,access_token,access_token_secret)
z<-searchTwitter("heretocreate", n=10000, lang="en")
w<-searchTwitter("mygirls", n=10000, lang="en")
length(w)
length(z)
heretocreate <- sapply(z, function(x) x$getText())
mygirls <- sapply(w, function(x) x$getText())
#write.csv(w,'mygrils_tweets.csv')
catch.error = function(x)
{
# let us create a missing value for test purpose
y = NA
# Try to catch that error (NA) we just created
catch_error = tryCatch(tolower(x), error=function(e) e)
# if not an error
if (!inherits(catch_error, "error"))
y = tolower(x)
# check result if error exists, otherwise the function works fine.
return(y)
}
cleanTweets<- function(tweet){
# Clean the tweet for sentiment analysis
# remove html links, which are not required for sentiment analysis
tweet = gsub("(f|ht)(tp)(s?)(://)(.*)[.|/](.*)", " ", tweet)
# First we will remove retweet entities from the stored tweets (text)
tweet = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", " ", tweet)
# Then remove all "#Hashtag"
tweet = gsub("#\\w+", " ", tweet)
# Then remove all "@people"
tweet = gsub("@\\w+", " ", tweet)
# Then remove all the punctuation
tweet = gsub("[[:punct:]]", " ", tweet)
# Then remove numbers, we need only text for analytics
tweet = gsub("[[:digit:]]", " ", tweet)
# finally, we remove unnecessary spaces (white spaces, tabs etc)
tweet = gsub("[ \t]{2,}", " ", tweet)
tweet = gsub("^\\s+|\\s+$", "", tweet)
# if anything else, you feel, should be removed, you can. For example "slang words" etc using the above function and methods.
# Next we'll convert all the word in lower case. This makes uniform pattern.
tweet = catch.error(tweet)
tweet
}
cleanTweetsAndRemoveNAs<- function(Tweets) {
TweetsCleaned = sapply(Tweets, cleanTweets)
# Remove the "NA" tweets from this tweet list
TweetsCleaned = TweetsCleaned[!is.na(TweetsCleaned)]
names(TweetsCleaned) = NULL
# Remove the repetitive tweets from this tweet list
TweetsCleaned = unique(TweetsCleaned)
TweetsCleaned
}
MyGirlsCleaned = cleanTweetsAndRemoveNAs(mygirls)
HereToCreateCleaned=cleanTweetsAndRemoveNAs(heretocreate)
length(MyGirlsCleaned)
length(HereToCreateCleaned)
neg.words = scan("negative-words.txt", what="character", comment.char=";")
pos.words = scan("positive-words.txt", what="character", comment.char=";")
neg.words = c(neg.words, 'wtf','smh')
pos.words = c(pos.words, 'lol')
getSentimentScore = function(sentences, words.positive, words.negative,
.progress='none')
{
require(plyr)
require(stringr)
scores = laply(sentences, function(sentence, words.positive, words.negative) {
# Let first remove the Digit, Punctuation character and Control characters:
sentence = gsub('[[:cntrl:]]', '', gsub('[[:punct:]]', '', gsub('\\d+', '', sentence)))
# Then lets convert all to lower sentence case:
sentence = tolower(sentence)
# Now lets split each sentence by the space delimiter
words = unlist(str_split(sentence, '\\s+'))
# Get the boolean match of each words with the positive & negative opinion-lexicon
pos.matches = !is.na(match(words, words.positive))
neg.matches = !is.na(match(words, words.negative))
# Now get the score as total positive sentiment minus the total negatives
score = sum(pos.matches) - sum(neg.matches)
return(score)
}, words.positive, words.negative, .progress=.progress )
# Return a data frame with respective sentence and the score
return(data.frame(text=sentences, score=scores)) }
MyGirlsResult = getSentimentScore(MyGirlsCleaned, pos.words , neg.words)
HereToCreateResult = getSentimentScore(HereToCreateCleaned, pos.words , neg.words)
class(MyGirlsResult)
head(MyGirlsResult)
head(HereToCreateResult)
names(MyGirlsResult)
par(mfrow=c(1,2))#we can partition the screen into parts to show multiple graps for understanding
hist(MyGirlsResult$score)
hist(HereToCreateResult$score)
write.csv(MyGirlsResult, "mygirls.csv")
write.csv(HereToCreateResult, "heartocreate.csv")
### Estimating emotions using Navye Bayes####
install.packages("Rstem")
install.packages("C:/Users/mano/Downloads/sentiment_0.2.tar.gz", repos = NULL, type = "source")
require(devtools)
require(sentiment)
ls("package:sentiment")
library(devtools)
library(sentiment)
# classify_emotion function returns an object of class data frame
# with seven columns (anger, disgust, fear, joy, sadness, surprise, #
# best_fit) and one row for each document:
HereToCreateClassEmo = classify_emotion(HereToCreateCleaned, algorithm="bayes", prior=1.0)
MyGirlsClassEmo = classify_emotion(MyGirlsCleaned, algorithm="bayes", prior=1.0)
head(MyGirlsClassEmo,10)
head(HereToCreateClassEmo,10)
MygirlsEmotion = MyGirlsClassEmo[,7]
HeretocreateEmotion = HereToCreateClassEmo[,7]
head(MygirlsEmotion,10)
head(HeretocreateEmotion,10)
head(MygirlsEmotion,50)
head(HeretocreateEmotion,50)
MyGirlsClassPol = classify_polarity(MyGirlsCleaned, algorithm="bayes")
HereToCreateClassPol = classify_polarity(HereToCreateCleaned, algorithm="bayes")
head(MyGirlsClassPol,10)
head(HereToCreateClassPol,10)
MygirlsSentimentDataFrame = data.frame(text=MyGirlsCleaned, emotion=MygirlsEmotion, polarity=MyGirlsClassPol, stringsAsFactors=FALSE)
HeretocreateSentimentDataFrame = data.frame(text=HereToCreateCleaned, emotion=HeretocreateEmotion, polarity=HereToCreateClassPol, stringsAsFactors=FALSE)
MygirlsSentimentDataFrame = within(MygirlsSentimentDataFrame, emotion <- factor(emotion, levels=names(sort(table(emotion), decreasing=TRUE))))
HeretocreateSentimentDataFrame = within(HeretocreateSentimentDataFrame, emotion <- factor(emotion, levels=names(sort(table(emotion), decreasing=TRUE))))
plotSentiments1<- function (sentiment_dataframe,title) {
library(ggplot2)
ggplot(sentiment_dataframe, aes(x=emotion)) +
geom_bar(aes(y=..count.., fill=emotion)) +
scale_fill_brewer(palette="Dark2") +
ggtitle(title) +
theme(legend.position='right') + ylab('Number of Tweets') +
xlab('Emotion Categories')
}
par(mfrow=c(1,2))
plotSentiments1(MygirlsSentimentDataFrame, 'Sentiment Analysis of Tweets on Twitter about MyGirls')
plotSentiments1(HeretocreateSentimentDataFrame, 'Sentiment Analysis of Tweets on Twitter about HereToCreate')
removeCustomeWords <- function (TweetsCleaned) {
for(i in 1:length(TweetsCleaned)){
TweetsCleaned[i] <- tryCatch({
TweetsCleaned[i] = removeWords(TweetsCleaned[i],
c(stopwords("english"), "care", "guys", "can", "dis", "didn",
"guy" ,"booked", "plz"))
TweetsCleaned[i]
}, error=function(cond) {
TweetsCleaned[i]
}, warning=function(cond) {
TweetsCleaned[i]
})
}
return(TweetsCleaned)
}
getWordCloud <- function
(sentiment_dataframe, TweetsCleaned, Emotion) {
emos = levels(factor(sentiment_dataframe$emotion))
n_emos = length(emos)
emo.docs = rep("", n_emos)
TweetsCleaned = removeCustomeWords(TweetsCleaned)
for (i in 1:n_emos){
emo.docs[i] = paste(TweetsCleaned[Emotion ==
emos[i]], collapse=" ")
}
corpus = Corpus(VectorSource(emo.docs))
tdm = TermDocumentMatrix(corpus)
tdm = as.matrix(tdm)
colnames(tdm) = emos
require(wordcloud)
suppressWarnings(comparison.cloud(tdm, colors =
brewer.pal(n_emos, "Dark2"), scale = c(3,.5), random.order =
FALSE, title.size = 1.5))
}
par(mfrow=c(1,2))
getWordCloud(MygirlsSentimentDataFrame, MyGirlsCleaned,
MygirlsEmotion)
getWordCloud(HeretocreateSentimentDataFrame,HereToCreateCleaned,
HeretocreateEmotion)
|
201561f8d32882b4d8741f9f256297561a1d5e04 | 08ccb95ddfc2079535af02c440759d8b2f56a779 | /FIT5147/week1/R-script.R | 28cecf9e784d8655073c5adacf94bef1193cc892 | [] | no_license | xianlinfeng/Documents | 05992fcc23946fb12e17295cef14b2e5aa4782e3 | ab423cb5550de8f2c5b96cb8dfdd0926f6140fb3 | refs/heads/master | 2022-12-17T22:21:43.301752 | 2020-09-23T01:56:33 | 2020-09-23T01:56:33 | 170,238,669 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,041 | r | R-script.R | library(ggmap)
library(ggplot2)
library(maps)
library(mapproj)
# or you can try e.g. require(maps)
# map("usa")
map('state') # ninst ot necessary, just checking we can draw a map
data <- read.csv("Household-heating-by-State-2008.csv", header=T)
head(data)
names(data)
names(data)[4] <- "MobileHomes"
names(data)
ag <- aggregate(MobileHomes ~ States, FUN = mean, data = data)
head(ag)
dim(ag)
m.usa <- map_data("state") # we want the states
head(m.usa)
dim(m.usa) # more info than we need
df <- data.frame(region = tolower(ag$States), MobileHomes = ag$MobileHomes, stringsAsFactors = F)
dim(df)
ggplot(df, aes(map_id = region)) + # Create a empty canvas
expand_limits(x = m.usa$long, y = m.usa$lat) + # draw the grid
geom_map(aes(fill = MobileHomes), map = m.usa) + # draw a us map, fill = MobileHomes means color the map according to MobileHomes property
coord_map() # fix the ratio of the x and y axises, to match a map
|
b31b3791f533a4dbdc18014beb0ae3fdb6e2015a | 1d9519ad72fd2a5a7aee02d591783b49f411d615 | /run_analysis.R | 002781a5cac9f582bffdb722da9f9385f9c570a2 | [] | no_license | CaoTungPHAM/GettingCleaningDataProject | 046bdaf64151fd7aeeecbdd646239f4951cd0fdc | 600740963934a00d03dd79f7e0c1135b51cf2558 | refs/heads/master | 2021-05-02T18:27:06.757016 | 2018-02-07T21:03:50 | 2018-02-07T21:03:50 | 120,664,320 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,815 | r | run_analysis.R | ### Download the file as a temporary file
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
temp <- tempfile()
download.file(fileUrl,destfile=temp,method="libcurl",mode="wb")
### Unzip this temporary file in a folder named "Project"
if(!file.exists("./Project")){dir.create("./Project")}
unzip(zipfile=temp,exdir="./Project")
path <- file.path("./Project" , "UCI HAR Dataset")
files <- list.files(path,recursive=TRUE)
### The above "files" contains all the file paths in "UCI HAR Dataset"
### Variables names for features are stored in features.txt, values for features are stored
### in X_train.txt and X_test.txt. We merge these data in a data frame named "features".
### We then give names to variables of "features" using the file "features.txt"
features_Test <- read.table(file.path(path, "test" , "X_test.txt" ),header = FALSE)
features_Train <- read.table(file.path(path, "train" , "X_train.txt" ),header = FALSE)
features <- rbind(features_Train,features_Test)
features_Names <- read.table(file.path(path,"features.txt"),header=FALSE,stringsAsFactors = FALSE)
names(features)=features_Names$V2
### Values for Y are stored in Y_train.txt and Y_test.txt.
### We then merge these data in a data frame named "Y". In fact, it must be named "Activity".
Y_Test <- read.table(file.path(path, "test" , "Y_test.txt" ),header = FALSE)
Y_Train <- read.table(file.path(path, "train" , "Y_train.txt" ),header = FALSE)
Y <- rbind(Y_Train,Y_Test)
names(Y)="Activity"
### Values for thes subjects are stored in subject_train.txt and subject_test.txt.
### We then merge these data in a data frame named "subject", and name it "Subject".
subject_Test <- read.table(file.path(path,"test","subject_test.txt"),header=FALSE)
subject_Train <- read.table(file.path(path,"train","subject_train.txt"),header=FALSE)
subject <- rbind(subject_Train,subject_Test)
names(subject)="Subject"
### Step 1: Merging the training and the test sets to create one data set, called "df"
df <- cbind(subject,Y)
df <- cbind(df,features)
### Step 2: Extracts only the measurements on the mean and standard deviation for
### each measurement. Stores them in a data frame called "Sub".
features_Names_mean <- features_Names[grepl("mean\\(\\)",features_Names$V2),]$V2
features_Names_std <- features_Names[grepl("std\\(\\)",features_Names$V2),]$V2
subNames <- c(features_Names_mean,features_Names_std)
Sub <- subset(df,select=c("Subject","Activity",subNames))
### Step 3: Uses descriptive activity names to name the activities in the data set
ActivityLabels <- read.table(file.path(path, "activity_labels.txt"),header = FALSE)
rep_func <- function(x){activityLabels$V2[x]}
Sub$Activity <- sapply(Sub$Activity,rep_func)
### Notes that we can check that activity values are well factors by using "head(Sub$Activity)"
### Step 4: Appropriately labels the data set with descriptive variable names
old_new <- data.frame(old=c("^t","^f","Acc","Gyro","Mag","BodyBody"),
new=c("time","frequency","Accelerometer","Gyroscope","Magnitude","Body"))
for (i in 1:dim(old_new)[1]) {names(Sub) <- gsub(old_new[i,1],old_new[i,2],names(Sub))}
### Step 5: With the average of each variable for each activity and each subject
### based on the data set in step 4, we create a tidy data and output it as "tidy.txt"
### Before doing anything, we check
table(Sub$Subject,Sub$Activity)
### which shows that the tidy data must contain 180 rows corresponding to each subject-activity.
if (!require("plyr")) {
install.packages("plyr")
}
require(plyr)
tidy_df<-aggregate(. ~Subject + Activity, Sub, mean)
tidy_df<-tidy_df[order(tidy_df$Subject,tidy_df$Activity),]
write.table(tidy_df, file = "tidy.txt",row.name=FALSE) |
398c2e9f2f9055dd57bf7ea5e334e039a4bacfb4 | 4b9955701ca424c19bec17f0bc4b36f72cfcbcc4 | /man/is.nullcpo.Rd | 074c841b8c77cd69f18851e0ae32c05681056f5d | [
"BSD-2-Clause"
] | permissive | mlr-org/mlrCPO | c238c4ddd72ece8549f8b48a79f02f543dac60e5 | e6fc62a4aeb2001a3760c9d1126f6f2ddd98cc54 | refs/heads/master | 2022-11-21T17:30:54.108189 | 2022-11-16T16:08:10 | 2022-11-16T16:08:10 | 100,395,368 | 39 | 4 | NOASSERTION | 2022-10-18T23:46:13 | 2017-08-15T16:08:30 | R | UTF-8 | R | false | true | 549 | rd | is.nullcpo.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NULLCPO.R
\name{is.nullcpo}
\alias{is.nullcpo}
\title{Check for NULLCPO}
\usage{
is.nullcpo(x)
}
\arguments{
\item{x}{[any]\cr
The object to check}
}
\value{
[\code{logical(1)}]. \code{TRUE} if \code{x} is a \code{NULLCPO}, \code{FALSE} otherwise.
}
\description{
Check whether the given object is a \code{\link{NULLCPO}}.
}
\seealso{
Other NULLCPO related:
\code{\link{NULLCPO}},
\code{\link{nullToNullcpo}()},
\code{\link{nullcpoToNull}()}
}
\concept{NULLCPO related}
|
1e9bf9d2d04a1c8758d9621da8572be61e8a10e2 | 1f4fb6044e39e1c632c13487fb79f7e5bc836175 | /docs/articles/cputools.R | fb9f4422fa767dae2c0b71280059b61049a26408 | [] | no_license | ComputationalProteomicsUnit/cputools | 777a034e14f938ded2fee6672d3da64fb171488a | 17e4584cf1cb3f874dcc60202680e7dcdf306188 | refs/heads/master | 2020-12-24T15:14:22.000702 | 2017-01-10T17:33:21 | 2017-01-10T17:33:21 | 19,713,721 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,369 | r | cputools.R | ## ----env, echo=FALSE-------------------------------------------------------
suppressPackageStartupMessages(library("BiocStyle"))
## ----lib-------------------------------------------------------------------
library("cputools")
## ----precommithook, eval=FALSE---------------------------------------------
# devtools::use_git_hook("pre-commit", devtools:::render_template("readme-rmd-pre-commit.sh"))
## --------------------------------------------------------------------------
makeBiocBuildShield("hpar")
makeBiocCovrShield("hpar")
## ---- echo=FALSE, results = "asis"-----------------------------------------
makeBiocBuildShield("hpar")
makeBiocCovrShield("hpar")
## --------------------------------------------------------------------------
makeTravisShield("cputools", user = "ComputationalProteomicsUnit")
## ---- echo=FALSE, results = "asis"-----------------------------------------
makeTravisShield("cputools", user = "ComputationalProteomicsUnit")
## ----pkgqst0, eval=TRUE----------------------------------------------------
pkgqsts("cputools", level = 2L, user = "ComputationalProteomicsUnit")
## ----pkgqst, eval=TRUE, results='asis'-------------------------------------
pkgqsts("cputools", bioc=FALSE, level = 2L, user = "ComputationalProteomicsUnit")
## ----si--------------------------------------------------------------------
sessionInfo()
|
89dd85a0a9cbc678f03783d56e903fdb031f063c | 288b4b6998906714ab368e0ee14c70a4059be4ab | /data-raw/dat.collins1985a.r | 7d5aaa1aa54e389c1e355635167034e26502da4f | [] | no_license | qsh7950/metadat | f6243a382c8c0e3f4c9a0e2cd657edb0ffa3e018 | 5c70fa63d7acfa1f315534fb292950513cb2281e | refs/heads/master | 2021-02-26T06:42:18.937872 | 2019-10-21T21:58:33 | 2019-10-21T21:58:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 160 | r | dat.collins1985a.r | dat.collins1985a <- read.table("data-raw/dat.collins1985a.txt", header=TRUE, stringsAsFactors=FALSE)
save(dat.collins1985a, file="data/dat.collins1985a.rda")
|
1afc514a13bfe58b79c763c9694715daeb568b4c | 210d8b7e86595b4b593083eb0f5767d420b83323 | /Capstone NFL Win Prediction Script MLR.R | 5d2e8986ea1c8c53689aaf30e189383fd1b6d2d2 | [] | no_license | mlrankinseattle/NFLprediction | ee69545ce01db3266f53b9029f32169a9cc94ae8 | 1be29051275874d33e1df92a3e33603825628376 | refs/heads/master | 2020-09-16T07:51:37.465502 | 2019-11-24T07:09:03 | 2019-11-24T07:09:03 | 223,703,317 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,643 | r | Capstone NFL Win Prediction Script MLR.R | #Installed nflscrapR to support scrapping data from NFL.com
library(devtools)
devtools::install_github(repo <- "maksimhorowitz/nflscrapR", force=TRUE)
devtools::install_github("dgrtwo/gganimate", force=TRUE)
install.packages('caTools')
#Loaded proper libraries needed
library(nflscrapR)
library(tidyverse)
library(caTools)
#Web Scrapping Data and Binding all seasons for years 2009 to 2019
play1 <- season_play_by_play(2019)
saveRDS(play1, "play_data_2019.rds")
play2 <- season_play_by_play(2018)
saveRDS(play2, "play_data_2018.rds")
play3 <- season_play_by_play(2017)
saveRDS(play3, "play_data_2017.rds")
play4 <- season_play_by_play(2016)
saveRDS(play4, "play_data_2016.rds")
play5 <- season_play_by_play(2015)
saveRDS(play5, "play_data_2015.rds")
play6 <- season_play_by_play(2014)
saveRDS(play6, "play_data_2014.rds")
play7 <- season_play_by_play(2013)
saveRDS(play7, "play_data_2013.rds")
play8 <- season_play_by_play(2012)
saveRDS(play8, "play_data_2012.rds")
play9 <- season_play_by_play(2011)
saveRDS(play9, "play_data_2011.rds")
play10 <- season_play_by_play(2010)
saveRDS(play10, "play_data_2010.rds")
play11 <- season_play_by_play(2009)
saveRDS(play11, "play_data_2009.rds")
#binding play by play data into a signle play object
play <- bind_rows(play1,play2,play3,play4,play5,play6,play7,play8,play9,play10, play11)
saveRDS(play, "play_data.rds")
#Scapring gameIDs for All Seasons into individual dataframes
games2019 <- scrape_game_ids(2019)
games2018 <- scrape_game_ids(2018)
games2017 <- scrape_game_ids(2017)
games2016 <- scrape_game_ids(2016)
games2015 <- scrape_game_ids(2015)
games2014 <- scrape_game_ids(2014)
games2013 <- scrape_game_ids(2013)
games2012 <- scrape_game_ids(2012)
games2011 <- scrape_game_ids(2011)
games2010 <- scrape_game_ids(2010)
games2009 <- scrape_game_ids(2009)
#Binding all rows for game years 2009 to 2019 into one games dataframe
games <- bind_rows(games2019, games2018, games2017, games2016, games2015, games2014, games2013, games2012, games2011, games2010, games2009)
saveRDS(games, "games_data.rds")
#Joining games and play datframes on game_ID and GameID
play_final <- full_join(games, play, by = c("game_id" = "GameID"))
saveRDS(play_final, "play_final.rds")
#Resulting data frame has 481629 observations and 112 variables
#Created simple variables to track the response in my model, track downs, quarters and using poswins to determian the winner.
play_final <- play_final %>% mutate(winner <- ifelse(home_score > away_score, home_team, away_team))
play_final <- play_final %>% mutate(poswins <- ifelse(winner == posteam, "Yes","No"))
play_final$qtr <- as.factor(play_final$qtr)
play_final$down <- as.factor(play_final$down)
play_final$poswins <- as.factor(play_final$poswins)
#Filtering plays that did not actually occure during regulation
noplay_data_filter = play_final %>% filter(PlayType != "No Play" & qtr != 5 & down != "NA") %>%
select(game_id, Date, posteam, HomeTeam, AwayTeam, winner, qtr, down, ydstogo, TimeSecs, yrdline100, ScoreDiff, poswins)
#Spliting filtered dataset and creating my training and test set.
set.seed(123)
split <- sample.split(noplay_data_filter$poswins, SplitRatio <- 0.8)
train <- noplay_data_filter %>% filter(split == TRUE)
test <- noplay_data_filter %>% filter(split == FALSE)
#test and train save point
#Removing any duplicate columns from our traing set
train = cbind(noplay_data_filter, noplay_data_filter)
train = test[,!duplicated(names(test))]
#Creating a logic regression model using the glm funciton and generating a summary of my model
model_lg = glm(poswins ~ qtr + down + ydstogo + TimeSecs + yrdline100 + ScoreDiff, train, family = "binomial")
summary(model_lg)
#Creating a prediction to estimate win probabilites for plays in the training set
pred <- predict(model_lg, train, type <- "response")
train <- cbind(train,pred)
#Prevention probabilty calculations focues on the home team data stats
train <- mutate(train, pred1h = ifelse(posteam == HomeTeam, pred, 1 - pred))
#Ploting the win probability of the home team tht is selected in the game_ID as the game is being played
#To test for a future game just input the gameID for the game being played on the day of the game (prefer after 2nd quarter starts)
ggplot(data=(filter(train, game_id == "2016090800")), aes(x <- TimeSecs,y <- pred)) + geom_line(size = 1, colour = "green") + scale_x_reverse() + ylim(c(0,1)) + theme_minimal() + xlab("Time Remaining (seconds)") + ylab("Home Win Probability")
|
cbb259f3a7cd11390a7601e230f49841e6fc9e33 | ee79ad37d2885aac5f5cecb9eea61fafee117a11 | /simultaneous-selection/analysis/R/modelAna.ev.rep.R | 63f78d2d6e59a39c1d7c8b2f6d5b9b233fe31018 | [] | no_license | vullab/ColorBinding | d33a3609b89f0916a026cae846742e1f065938ea | b3c6e718f1233ad9b4568a51f794cf3b5966145f | refs/heads/master | 2021-03-16T10:26:02.634673 | 2018-12-16T04:04:29 | 2018-12-16T04:04:29 | 65,759,803 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 6,993 | r | modelAna.ev.rep.R | # setwd('/Users/tflew/Google Drive/Vul_Lab/ColorBind/Colorbinding_cory/colorbindingCory/data')
data<-NULL
path = '../data_rep/'
for (file in list.files(path))
if ("1"==substr(file,nchar(file)-4,nchar(file)-4)){
subjMain <- read.csv(paste0(path,file))
if (nrow(subjMain)>=200){
subjectName <- substr(file,7,nchar(file)-6)
subjMain$subjectID <- subjectName
if ("offsett" %in% colnames(subjMain) & !("offset" %in% colnames(subjMain))){
subjMain$offset <- subjMain$offsett
subjMain$offset2 <- 0
subjMain$offsett <- NULL
}
if (ncol(subjMain)==17){
subjMain$offset <- 0
subjMain$offset2 <- 0
subjMain$version <- subjMain$bars.or.spot - 2
subjMain$actualStimOn <- NA
}
print(ncol(subjMain))
if(ncol(subjMain) <= 20 & is.null(subjMain$version)){
if (subjMain$offset[1] == 3.75){
subjMain$version = 1
}else if(subjMain$offset[1] == 5){
subjMain$version = 2
}else if(subjMain$offset[1] == 5.5){
subjMain$version = 3
}else if(subjMain$offset[1] == 4.5){
subjMain$version = 4
}else if(subjMain$offset[1] == -10.5){
subjMain$version = 5
}else if(subjMain$offset[1] == -9){
subjMain$version = 6
}else if(subjMain$offset[1] == 0){
print("1")
}else{
browser()
print (subjMain$offset[1])
}
}
# print (subjMain$offset[1])
#print (ncol(subjMain) )
data<-rbind(data,subjMain)
}
}
data$resp.v.pos <- as.numeric(data$resp.v.pos)
data$resp.h.pos <- as.numeric(data$resp.h.pos)
data$resp.v.hv <- as.numeric(data$resp.v.hv)
data$resp.h.hv <- as.numeric(data$resp.h.hv)
data$resp.h.idx = data$resp.h.pos+3+(data$resp.h.hv-1)*5
data$resp.v.idx = data$resp.v.pos+3+(data$resp.v.hv-1)*5
table(data$version)
temp<-aggregate(data=data,rep(1,nrow(data))~version+subjectID,min)
table(temp$version)
data <- subset(data, data$version %in% c(-1,0,1,2,5,6,8,9,11,12,13))
data$version = factor(data$version, levels=c('-1','0','1','2','5','6','8','9','11','12','13'))
save('data', file = 'read.data.rep.Rdata')
load(file = 'read.data.rep.Rdata')
source('model.ed.2016-08-30.R')
nLL <- function(p.target, p.whole, p.part, p.color, p.rep, scale, norep){
p.mat <- predictMat(p.target, p.whole, p.part, p.color, p.rep, scale, norep)
# for norep only
if(norep==1){
diag(p.mat) <- 1
}
LL <- sum(log(p.mat)*dat)
return(-LL+pmax(-5, scale))
}
models = list(
'all' = list(
'params' = function(){list(p.target = rnorm(1, -1, 1+attempt/100),
p.whole = rnorm(1, -1, 1+attempt/100),
p.part=rnorm(1, -1, 1+attempt/100),
p.color=rnorm(1, -1, 1+attempt/100),
scale=rnorm(1, 0, 1+attempt/100), #,
p.rep=rnorm(1, 0, 1+attempt/100)
)},
'fixed' = list(norep=0)),
'nowhole' = list(
'params' = function(){list(p.target = rnorm(1, -1, 1+attempt/100),
p.part=rnorm(1, -1, 1+attempt/100),
p.color=rnorm(1, -1, 1+attempt/100),
scale=rnorm(1, 0, 1+attempt/100), #,
p.rep=rnorm(1, 0, 1+attempt/100)
)},
'fixed' = list(norep=0,p.whole = -100)),
'nopart' = list(
'params' = function(){list(p.target = rnorm(1, -1, 1+attempt/100),
p.whole = rnorm(1, -1, 1+attempt/100),
p.color=rnorm(1, -1, 1+attempt/100),
scale=rnorm(1, 0, 1+attempt/100), #,
p.rep=rnorm(1, 0, 1+attempt/100)
)},
'fixed' = list(norep=0,p.part = -100)),
'nowholepart' = list(
'params' = function(){list(p.target = rnorm(1, -1, 1+attempt/100),
p.color=rnorm(1, -1, 1+attempt/100),
scale=rnorm(1, 0, 1+attempt/100), #,
p.rep=rnorm(1, 0, 1+attempt/100)
)},
'fixed' = list(norep=0,p.whole = -100, p.part=-100))
)
failures = data.frame()
fits = data.frame()
for(v in levels(data$version)){
tmp <- subset(data, data$version == v)
for(s in unique(tmp$subjectID)){
tmp.sub <- subset(tmp, tmp$subjectID==s)
dat <- as.matrix(table(tmp.sub$resp.h.idx, tmp.sub$resp.v.idx))
for(model in names(models)){
res = FALSE
attempt = 1
while(class(res)!='mle' & attempt <= 100){
# print(attempt)
try(res <- stats4::mle(minuslogl = nLL,
start = models[[model]][['params']](),
fixed = models[[model]][['fixed']],
nobs = sum(dat)),
silent=TRUE)
attempt = attempt+1
}
if(class(res)=='mle'){
C = stats4::coef(res)
# C = coef(summary(res))[, 'Estimate']
fits <- rbind(fits,
data.frame(
repetition = 'rep',
version = v,
subject = s,
model = model,
LL = logLik(res),
AIC = AIC(res),
BIC = BIC(res),
n = sum(dat),
p.target = C['p.target'],
p.whole = C['p.whole'],
p.part = C['p.part'],
p.color = C['p.color'],
p.rep = C['p.rep'],
scale = C['scale'],
norep = C['norep'])
)
} else {
print(sprintf('failed on v=%s; s=%s, model=%s', v, s, model))
failures = rbind(failures,
data.frame('v'=v, 's'=s, 'model'=model))
}
}
}
}
save('data', 'fits', 'failures', file = 'fits.rep.Rdata')
# ggplot(recoded.df, aes(y=Var1, x=Var2, size=Freq))+facet_wrap(~version)+geom_point()+theme_bw()
#
# fitsummaries = data.frame()
#
# for(v in levels(data$version)){
# fits[[v]]$p.part.net = (1-fits[[v]]$p.whole)*fits[[v]]$p.part
# fits[[v]]$p.color.net = (1-fits[[v]]$p.whole)*(1-fits[[v]]$p.part)*fits[[v]]$p.color
# fits[[v]]$p.uniform.net = (1-fits[[v]]$p.whole)*(1-fits[[v]]$p.part)*(1-fits[[v]]$p.color)
# fitsummaries = rbind(fitsummaries,
# data.frame(repetition = 'norep',
# version=v,
# p.whole = mean(fits[[v]]$p.whole, trim = 0.05),
# p.part = ,
# p.color = ,
# scale = ,
# net.p.whole = ,
# net.p.part = ,
# net.p.color = ,
# net.p.uniform = ))
# }
#
|
75d077d11579dba4e0b72e3b9c1ab69f147bc20b | fcbe81388b0f883ff36a9e7173277f80789fa871 | /man/file.format_--methods.Rd | edb27dfde15536dded8db131d10402b5a64b76ba | [] | no_license | cran/crp.CSFP | 15b76b1860f47153a0909b1b9520f4357738d383 | b94ad004acd7cfb33210b5f3c74976ca7561c04a | refs/heads/master | 2021-01-10T21:24:19.961192 | 2016-09-11T18:35:59 | 2016-09-11T18:35:59 | 17,695,304 | 1 | 3 | null | null | null | null | UTF-8 | R | false | false | 347 | rd | file.format_--methods.Rd | \name{file.format<-}
\docType{methods}
\alias{file.format<-}
\alias{set.file.format<-}
\alias{set.file.format<-,crp.CSFP,character-method}
\alias{file.format<--methods}
\alias{file.format<-,crp.CSFP,character-method}
\title{Set the file format}
\description{
The method changes the models value of \code{file.format}}
\keyword{methods}
|
6002858d8be17d5445f322b89f01707120f97fb2 | 56063be99831af5f4055fb32fefe3a2dbbcc50eb | /Code/affordability_metrics.R | 4dde667e1aab7dfe396b244130e2dad2699280ed | [
"MIT"
] | permissive | ksonda/CA-Water-Privatization | 80f3879eb1dd1c5583149fdce2c13bf991fd99ef | 77ec4d565d6bcd3bc2db5f545984fea21683ce81 | refs/heads/main | 2023-05-04T11:01:41.848681 | 2021-05-26T16:00:19 | 2021-05-26T16:00:19 | 322,007,655 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,348 | r | affordability_metrics.R | ---------------------------------------------------------------------------------------------------------------
#---------- This code merges in census data for counts of households by size and income, calculates "resonable
#---------- levels of consumption by household size, and constructs affordability metrics
#---------------------------------------------------------------------------------------------------------------
rm(list = ls()) #cleaning workspace, rm for remove
#dev.off() #clearing the plots
getwd()
my_packages <- c("rgeos","raster","tidyverse","magrittr","haven","readxl","tools","usethis","RColorBrewer","ggrepel","sf","tictoc","lubridate","tidycensus")
#install.packages(my_packages, repos = "http://cran.rstudio.com")
lapply(my_packages, library, character.only = TRUE)
# read in summary table (wide form)
d <- read_csv("Data/OWRS/summary_table_cleaned_wide_formatv2.csv")
View(d)
# Calculate reasonable consumption: Let's say reasonable consumption is 50gpcd
# e.g. https://awwa.onlinelibrary.wiley.com/doi/full/10.5942/jawwa.2018.110.0002
hh_sizes<-c(1:7) #household sizes in census go from 1 to 7
gpcd<-50 # set gpcd for affordability threshold
d<-crossing(d,hh_sizes)
d<- d%>%mutate(reasonable_consumption_kgal=(gpcd*hh_sizes*365/12)/1000)
d$tier_0<-as.numeric(d$tier_0)
#clean up water rates a bit, more, need to remove NAs from tier widths and tiers
d<- d%>%mutate(tier_0 = case_when(is.na(tier_0) ~ 0,
TRUE ~ tier_0),
tier_1 = case_when(is.na(tier_1) ~ 10^5,
TRUE ~ tier_1),
tier_2 = case_when(is.na(tier_2) ~ 10^6,
TRUE ~ tier_2),
tier_3 = case_when(is.na(tier_3) ~ 10^7,
TRUE ~ tier_3),
tier_4 = case_when(is.na(tier_4) ~ 10^8,
TRUE ~ tier_4),
tier_5 = case_when(is.na(tier_5) ~ 10^9,
TRUE ~ tier_5),
tier_6 = case_when(is.na(tier_6) ~ 10^10,
TRUE ~ tier_6),
tier_7 = case_when(is.na(tier_7) ~ 10^11,
TRUE ~ tier_7),
tier_8 = case_when(is.na(tier_8) ~ 10^12,
TRUE ~ tier_8),
tier_1_price = case_when(is.na(tier_1_price) ~ 0,
TRUE ~ tier_1_price),
tier_2_price = case_when(is.na(tier_2_price) ~ 0,
TRUE ~ tier_2_price),
tier_3_price = case_when(is.na(tier_3_price) ~ 0,
TRUE ~ tier_3_price),
tier_4_price = case_when(is.na(tier_4_price) ~ 0,
TRUE ~ tier_4_price),
tier_5_price = case_when(is.na(tier_5_price) ~ 0,
TRUE ~ tier_5_price),
tier_6_price = case_when(is.na(tier_6_price) ~ 0,
TRUE ~ tier_6_price),
tier_7_price = case_when(is.na(tier_7_price) ~ 0,
TRUE ~ tier_7_price),
tier_8_price = case_when(is.na(tier_8_price) ~ 0,
TRUE ~ tier_8_price)
)
#Calculate monthly water bill at each consumption level
#function takes in summary data frame, and a vector of volumes, returns data frame with revenue for blocks and total water bill
volRevCalc<-function(data,vol){
data<-data%>%mutate(
# rev1=tier_1_price*pmax(pmin(vol,tier_0),0),
rev1=tier_1_price*pmax((pmin(vol-tier_0,tier_1-tier_0)),0),
rev2=tier_2_price*pmax((pmin(vol-tier_1,tier_2-tier_1)),0),
rev3=tier_3_price*pmax((pmin(vol-tier_2,tier_3-tier_2)),0),
rev4=tier_4_price*pmax((pmin(vol-tier_3,tier_4-tier_3)),0),
rev5=tier_5_price*pmax((pmin(vol-tier_4,tier_5-tier_4)),0),
rev6=tier_6_price*pmax((pmin(vol-tier_5,tier_6-tier_5)),0),
rev7=tier_7_price*pmax((pmin(vol-tier_6,tier_7-tier_6)),0),
rev8=tier_8_price*pmax((pmin(vol-tier_7,tier_8-tier_7)),0)
)
total_bill=data$service_charge + data$rev1 + data$rev2 + data$rev3 + data$rev4 + data$rev5 + data$rev6 + data$rev7 + data$rev8
return(total_bill)
}
d<-S%>%mutate(total_bill_by_hhsize=volRevCalc(d,d$reasonable_consumption_kgal),
mean_bill=volRevCalc(d,d$usage_ccf*0.748))
d$approximate_median_income[which(d$median_income_category=="$150,000 to $199,999")]<-175000
##### AFFORDABILITY METRIC 1: % Median HH Income
# The most commonly used affordability metric is
# the average water bill as a % of median area HH income (let's call perc_MHI)
d<-d%>%mutate(perc_MHI=100*12*mean_bill/approximate_median_income)
##### AFFORDABILITY METRIC 2: Hours of Labor at Minimum Wage (HM)
# See https://awwa.onlinelibrary.wiley.com/doi/full/10.5942/jawwa.2018.110.0002
mw = 12 #assume uniform minimum wage in CA at $12/hr. In future versions, would merge in relevant minimum wages
d<-d%>%mutate(MH=total_bill_by_hhsize/mw)
##### AFFORDABILITY METRIC 3: WARi
# See
|
016f7130a93fff546e3594c0a266eca71db42cba | 6af19368b56b127648e86dca3e263c9daddef0f9 | /R/phrasemachine/R/POS_tag_documents.R | 79ea9388d17e8ebb416ba5e5b44392a8482d8b9b | [
"MIT"
] | permissive | slanglab/phrasemachine | 6aaeb54bf1ac779bbde5cc8feea9a1a715d55c14 | 41cec3b727f854aa6ef8ab0b45228c3216ff0d4a | refs/heads/master | 2021-01-12T13:48:26.917358 | 2020-06-25T17:25:53 | 2020-06-25T17:25:53 | 69,607,382 | 193 | 27 | MIT | 2019-06-06T16:18:47 | 2016-09-29T20:58:34 | Python | UTF-8 | R | false | false | 2,865 | r | POS_tag_documents.R | #' @title POS tag documents
#' @description Annotates documents (provided as a character vector with one
#' entry per document) with pars-of-speech (POS) tags using the openNLP POS
#' tagger
#'
#' @param documents A vector of strings (one per document).
#' @param memory The default amount of memory (512MB) assigned to the NLP
#' package to POS tag documents is often not enough for large documents, which
#' can lead to a "java.lang.OutOfMemoryError". The memory argument defaults to
#' "-Xmx512M" (512MB) in this package, and can be increased if necessary to
#' accommodate very large documents.
#' @return A list object.
#' @examples
#' \dontrun{
#' # make sure quanteda is installed
#' requireNamespace("quanteda", quietly = TRUE)
#' # load some example data:
#' documents <- quanteda::data_corpus_inaugural
# documents <- documents[1:10,]
#'
#' # run tagger
#' tagged_documents <- POS_tag_documents(documents)
#' }
#' @export
POS_tag_documents <- function(documents,
memory = "-Xmx512M"){
# set the amount of heap space available to Java in the NLP package
options(java.parameters = memory)
# NULL out to deal with R CMD check note
type <- NULL
# create a list object to store tagged tokens
tagged_documents <- vector(mode = "list", length = length(documents))
# loop through documents and tag them
for (i in 1:length(documents)) {
cat("Currently tagging document",i,"of",length(documents),"\n")
# extract the current document
document <- documents[i]
# get rid of extra spaces.
document <- stringr::str_replace_all(document,"[\\s]+"," ")
document <- stringr::str_replace_all(document,"[\\s]$","")
document <- NLP::as.String(document)
# annotate words with POS tags
wordAnnotation <- NLP::annotate(
document,
list(openNLP::Maxent_Sent_Token_Annotator(),
openNLP::Maxent_Word_Token_Annotator()))
POSAnnotation <- NLP::annotate(
document,
openNLP::Maxent_POS_Tag_Annotator(),
wordAnnotation)
# extract the tagged words so we can get the tokens
POSwords <- subset(POSAnnotation, type == "word")
# extract the tokens and tags
tags <- sapply(POSwords$features, '[[', "POS")
tokens <- document[POSwords][1:length(tags)]
# store everything in a list object
tagged_documents[[i]] <- list(tokens = tokens,
tags = tags)
}
# give the documents names or pass on names if they were provided
if (is.null(names(documents))) {
names(tagged_documents) <- paste("Document_",1:length(documents),sep = "")
} else {
names(tagged_documents) <- names(documents)
}
# return everything
return(tagged_documents)
}
|
6b12cc2bf9f95b6b6cd11f2e1ae151dd2a21f400 | 34b9322c5557921ae0272c1b9edf03e17247b317 | /man/invCLR.Rd | b19f2ec51a7c333f6ae9c940e0bd85eb8d2524f4 | [] | no_license | cran/easyCODA | 19617b508ee5c490b53bb933ed85bac04747d590 | eec30261bcc0f38c8c0ffb4515e4b36363288ef0 | refs/heads/master | 2021-06-28T18:42:43.463905 | 2020-09-19T17:40:07 | 2020-09-19T17:40:07 | 135,413,733 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,091 | rd | invCLR.Rd | \name{invCLR}
\alias{invCLR}
\title{
Inverse of centred logratios
}
\description{
Given centred logratios (CLRs), compute the inverse (i.e. recover the original parts)
}
\usage{
invCLR(CLRmatrix, part.names=colnames(CLRmatrix))
}
\arguments{
\item{CLRmatrix}{
A matrix of centred logratios
}
\item{part.names}{
Part names in the reconstructed compositional data matrix
}
}
\details{
The function \code{invCLR} computes the original parts, given the centred logratios (CLRs)
}
\value{
\item{parts}{
The reconstructed parts (they add up to 1)
}
}
\references{
Aitchison, J. (1986), The Statistical Analysis of Compositional Data, Chapman & Hall. \cr
Greenacre, M. (2018), Compositional Data Analysis in Practice, Chapman & Hall / CRC Press.
}
\author{
Michael Greenacre
}
\seealso{
\code{\link{CLR}}, \code{\link{ALR}}, \code{\link{invALR}}, \code{\link{LR.VAR}}
}
\examples{
data(veg)
# compute centred logratios
veg.CLR <- CLR(veg)$LR
# invert back to original parts (parts closed to sum to 1)
invALR(veg.CLR)
}
\keyword{logratio}
|
17bbbb456072b5cdd29320cb40a462bc7ae7ceb0 | e7c0f051f47e7b0b024b4d1814832d98359fee53 | /R/utils.R | 55f4c3819ea2507f835c3b6958088389f5853fc4 | [
"BSD-3-Clause"
] | permissive | holbig/EtaModelCC | 366b12c1b565c3c319dbe945035856acbe5c2f50 | e7c0a9447b851632d3d5b5b51b7993d5229bc0a6 | refs/heads/master | 2021-06-12T14:56:38.538115 | 2021-04-01T14:02:24 | 2021-04-01T14:02:24 | 128,685,165 | 7 | 3 | BSD-3-Clause | 2021-01-13T19:12:18 | 2018-04-08T21:56:48 | CSS | UTF-8 | R | false | false | 2,152 | r | utils.R | #' @import "raster"
#' @importFrom "magrittr" "%>%"
# PROJETA API URL
projeta_api <- "https://projeta.cptec.inpe.br/api/v1/public/ETA/"
# ERRO API URL
erro_api <- "Ocorreu um erro ao tentar se conectar com a API."
# Data-frame com os dados dos modelos de mudanças climaticas
url <- "https://projeta.cptec.inpe.br/api/v1/public/models"
models <- jsonlite::fromJSON(url)
models <- as.data.frame(models$data)
# Data-frame com variaveis geradas pelos modelos de mudancas climaticas
url <- "https://projeta.cptec.inpe.br/api/v1/public/variables"
variables <- jsonlite::fromJSON(url)
variables <- as.data.frame(variables$data[,c(1,3,2,4)])
names(variables)[2] <- c("variable")
load(paste0(system.file("extdata", package = "EtaModelCC"),"/units.Rda"))
variables <- merge(variables,units[,c("variable","unit")], by="variable", all.x = TRUE)
#LON LAT
latMin <- -35.1 # -35.05
latMax <- 5.9 # 5.90
lonMin <- -76.7 # -75.05
lonMax <- -32.01 # -33.95
checkCoordinates <- function(lat, lon){
lat <- as.numeric(lat)
lon <- as.numeric(lon)
if((lat < latMin || lat > latMax) && (lon < lonMin || lon > lonMax)){
#if(lon < lonMin || lon > lonMax){
stop(paste("Coordenadas de LATITUDE(",lat,") e LONGITUDE(",lon,") fora da faixa de dados da previsao \n",
"AREA COVERED IN THE MODEL:\n",
"--> LONGITUDE: ", lonMin ,"to ",lonMax,"\n",
"--> LATITUDE : ",latMin," to ",latMax,"\n"))
}
#else{
if(lat < latMin || lat > latMax){
stop(paste("Coordenada de LATITUDE(",lat,") fora da faixa de dados da previsao \n",
"AREA COVERED IN THE MODEL:\n",
"--> LONGITUDE: ", lonMin ,"to ",lonMax,"\n",
"--> LATITUDE : ",latMin," to ",latMax,"\n"))
}
#}
#else{
if(lon < lonMin || lon > lonMax){
stop(paste("Coordenada de LONGITUDE(",lon,") fora da faixa de dados da previsao \n",
"AREA COVERED IN THE MODEL:\n",
"--> LONGITUDE: ", lonMin ,"to ",lonMax,"\n",
"--> LATITUDE : ",latMin," to ",latMax,"\n"))
}
#}
#}
}
|
eb750e1756c99784cf1ef40b8fc69c41099c0ca6 | 19396cd8c782fa4dc4762ecfe5c26f329c2f44f4 | /R/derivedEquations.R | 46cffeb3a4923ddd38386c22b146a2fc69be3205 | [
"MIT"
] | permissive | davidcsterratt/cOde | 65b8c3e3ef76340be34f74e3395d00c17c2e9b07 | e1e13938b7e3dad424531acb59bca733d59f4177 | refs/heads/master | 2020-04-03T10:45:43.797915 | 2018-10-29T11:47:42 | 2018-10-29T11:47:42 | 155,201,784 | 0 | 0 | null | 2018-10-29T11:39:06 | 2018-10-29T11:39:06 | null | UTF-8 | R | false | false | 12,656 | r | derivedEquations.R |
#' Compute sensitivity equations of a function symbolically
#'
#' @param f named vector of type character, the functions
#' @param states Character vector. Sensitivities are computed with respect to initial
#' values of these states
#' @param parameters Character vector. Sensitivities are computed with respect to initial
#' values of these parameters
#' @param inputs Character vector. Input functions or forcings. They are excluded from
#' the computation of sensitivities.
#' @param events data.frame of events with columns "var" (character, the name of the state to be
#' affected), "time" (numeric or character, time point),
#' "value" (numeric or character, value), "method" (character, either
#' "replace" or "add"). See \link[deSolve]{events}.
#' Within \code{sensitivitiesSymb()} a \code{data.frame} of additional events is generated to
#' reset the sensitivities appropriately, depending on the event method.
#' @param reduce Logical. Attempts to determine vanishing sensitivities, removes their
#' equations and replaces their right-hand side occurences by 0.
#' @details The sensitivity equations are ODEs that are derived from the original ODE f.
#' They describe the sensitivity of the solution curve with respect to parameters like
#' initial values and other parameters contained in f. These equtions are also useful
#' for parameter estimation by the maximum-likelihood method. For consistency with the
#' time-continuous setting provided by \link{adjointSymb}, the returned equations contain
#' attributes for the chisquare functional and its gradient.
#' @return Named vector of type character with the sensitivity equations. Furthermore,
#' attributes "chi" (the integrand of the chisquare functional), "grad" (the integrand
#' of the gradient of the chisquare functional), "forcings" (Character vector of the
#' additional forcings being necessare to compute \code{chi} and \code{grad}) and "yini" (
#' The initial values of the sensitivity equations) are returned.
#' @example inst/examples/example2.R
#' @example inst/examples/example2_sundials.R
#' @example inst/examples/example3.R
#' @export
sensitivitiesSymb <- function(f, states = names(f), parameters = NULL, inputs = NULL, events = NULL, reduce = FALSE) {
variables <- names(f)
states <- states[!states%in%inputs]
if (is.null(parameters)) {
pars <- getSymbols(c(f,
as.character(events[["value"]]),
as.character(events[["time"]])),
exclude = c(variables, inputs, "time"))
} else {
pars <- parameters[!parameters%in%inputs]
}
if (length(states) == 0 & length(pars) == 0)
stop("Attempt to compute sensitivities although both states and parameters had length 0.")
Dyf <- jacobianSymb(f, variables)
Dpf <- jacobianSymb(f, pars)
df <- length(f)
dv <- length(variables)
ds <- length(states)
dp <- length(pars)
# generate sensitivity variable names and names with zero entries then
# write sensitivity equations in matrix form
Dy0y <- Dpy <- NULL
sensParVariablesY0 <- sensParVariablesP <- NULL
if (ds > 0) {
mygridY0 <- expand.grid.alt(variables, states)
sensParVariablesY0 <- apply(mygridY0, 1, paste, collapse = ".")
Dy0y <- matrix(sensParVariablesY0, ncol = ds, nrow = dv)
}
if (dp > 0) {
mygridP <- expand.grid.alt(variables, pars)
sensParVariablesP <- apply(mygridP, 1, paste, collapse = ".")
Dpy <- matrix(sensParVariablesP, ncol = dp, nrow = dv)
}
gl <- NULL
if (!is.null(Dy0y)) {
gl <- c(gl, as.vector(prodSymb(matrix(Dyf, ncol = dv), Dy0y)))
}
if (!is.null(Dpy)) {
gl <- c(gl, as.vector(sumSymb(prodSymb(matrix(Dyf, ncol = dv), Dpy), matrix(Dpf, nrow = dv))))
}
newfun <- gl
newvariables.grid <- expand.grid.alt(variables, c(states, pars))
newvariables <- apply(newvariables.grid, 1, paste, collapse=".")
names(newfun) <- newvariables
# Compute list of new events
# var.p, t_var, 0 (if replace or add)
# var.p_var, t_var, 1 (if replace or add)
# x.t_var, t_var, f.var (if replace or add)
events.addon <- NULL
if (!is.null(events)) {
events.addon <- do.call(rbind, lapply(1:nrow(events), function(i) {
# Get variable, time and value
var <- as.character(events[["var"]][i])
tvar <- intersect(getSymbols(as.character(events[["time"]][i])), parameters)
eventpar <- intersect(getSymbols(as.character(events[["value"]][i])), parameters)
# Events for sensitivities with respect to time parameter, first
x.tvar <- NULL
if (length(tvar) > 0) {
statesNoVar <- setdiff(states, var)
x.tvar <- rbind(
data.frame(
var = paste(statesNoVar, tvar, sep = "."),
time = events[["time"]][i],
value = switch(
as.character(events[["method"]][i]),
replace = paste0("(", jacobianSymb(f[statesNoVar], var), ") * (", as.character(events[["var"]][i]), " - ", as.character(events[["value"]][i]), ")"),
add = paste0("(", jacobianSymb(f[statesNoVar], var), ") * (-", as.character(events[["value"]][i]), ")")),
method = events[["method"]][i],
stringsAsFactors = FALSE
),
data.frame(
var = paste(var, tvar, sep = "."),
time = events[["time"]][i],
value = switch(
as.character(events[["method"]][i]),
replace = paste0("(", jacobianSymb(f[var], var), ") * (", var ,"-", as.character(events[["value"]][i]), ") - (", f[var], ")"),
add = paste0("(", jacobianSymb(f[var], var), ") * (-", as.character(events[["value"]][i]), ")")),
method = events[["method"]][i],
stringsAsFactors = FALSE
)
)
}
# Events for sensitivities of state affected by event, second
if (length(eventpar) == 0) eventpar <- "-1"
parsNoTime <- setdiff(c(states, pars), tvar)
var.p <- data.frame(
var = paste(var, parsNoTime , sep = "."),
time = events[["time"]][i],
value = ifelse(parsNoTime == eventpar, 1, 0),
method = events[["method"]][i],
stringsAsFactors = FALSE
)
return(rbind(x.tvar, var.p))
}))
events <- events.addon
rownames(events) <- NULL
}
# Reduce the sensitivities
vanishing <- c(sensParVariablesY0[!(sensParVariablesY0 %in% as.character(events[["var"]][as.character(events[["value"]]) != "0"]))],
sensParVariablesP[Dpf == "0" & !(sensParVariablesP %in% as.character(events[["var"]][as.character(events[["value"]]) != "0"]))])
if(reduce) {
newfun <- reduceSensitivities(newfun, vanishing)
is.zero.sens <- names(newfun) %in% attr(newfun,"is.zero")
} else {
is.zero.sens <- rep(FALSE, length(newfun))
}
events <- events[!as.character(events[["var"]]) %in% names(newfun)[is.zero.sens], ]
newfun <- newfun[!is.zero.sens]
output.reduction <- structure(rep(0, length(which(is.zero.sens))), names = newvariables[is.zero.sens])
# Append initial values
initials <- rep(0, length(newfun))
names(initials) <- newvariables[!is.zero.sens]
ones <- which(apply(newvariables.grid, 1, function(row) row[1] == row[2]))
initials[newvariables[ones]] <- 1
# Construct index vector for states and parameters indicating non-vanishing
# sensitivity equations.
# States
hasSens.stateNames <- intersect(sensParVariablesY0, names(initials))
hasSens.StatesIdx <- rep(FALSE, length(sensParVariablesY0))
names(hasSens.StatesIdx) <- sensParVariablesY0
hasSens.StatesIdx[hasSens.stateNames] <- TRUE
# Parameters
hasSens.parameterNames <- intersect(sensParVariablesP, names(initials))
hasSens.ParameterIdx <- rep(FALSE, length(sensParVariablesP))
names(hasSens.ParameterIdx) <- sensParVariablesP
hasSens.ParameterIdx[hasSens.parameterNames] <- TRUE
# Compute wrss
pars <- c(pars, states)
statesD <- paste0(states, "D")
weightsD <- paste0("weight", statesD)
res <- paste0(weightsD,"*(", states, "-", statesD, ")")
sqres <- paste0(res, "^2")
chi <- c(chi = paste0(sqres, collapse =" + "))
sensitivities <- lapply(pars, function(p) paste0(states, ".", p))
names(sensitivities) <- pars
grad <- sapply(pars, function(p) paste0(paste("2*", res, "*", sensitivities[[p]]), collapse=" + "))
names(grad) <- paste("chi", pars, sep=".")
grad <- replaceSymbols(newvariables[is.zero.sens], "0", grad)
attr(newfun, "chi") <- chi
attr(newfun, "grad") <- grad
attr(newfun, "outputs") <- output.reduction
attr(newfun, "forcings") <- c(statesD, weightsD)
attr(newfun, "yini") <- initials
attr(newfun, "events") <- events
attr(newfun, "hasSensStatesIdx") <- hasSens.StatesIdx
attr(newfun, "hasSensParametersIdx") <- hasSens.ParameterIdx
return(newfun)
}
#' Compute adjoint equations of a function symbolically
#'
#' @param f Named vector of type character, the functions
#' @param states Character vector of the ODE states for which observations are available
#' @param inputs Character vector of the "variable" input states, i.e. time-dependent parameters
#' (in contrast to the forcings).
#' @param parameters Character vector of the parameters
#' @details The adjoint equations are computed with respect to the functional
#' \deqn{(x, u)\mapsto \int_0^T \|x(t)-x^D(t)\|^2 + \|u(t) - u^D(t)\|^2 dt,}{(x, u) -> int( ||x(t) - xD(t)||^2 + ||u(t) - uD(t)||^2, dt),}
#' where x are the states being constrained
#' by the ODE, u are the inputs and xD and uD indicate the trajectories to be best
#' possibly approached. When the ODE is linear with respect to u, the attribute \code{inputs}
#' of the returned equations can be used to replace all occurences of u by the corresponding
#' character in the attribute. This guarantees that the input course is optimal with
#' respect to the above function.
#' @return Named vector of type character with the adjoint equations. The vector has attributes
#' "chi" (integrand of the chisquare functional), "grad" (integrand of the gradient of the chisquare functional),
#' "forcings" (character vector of the forcings necessary for integration of the adjoint equations) and
#' "inputs" (the input expressed as a function of the adjoint variables).
#' @example inst/examples/example5.R
#' @export
adjointSymb <- function(f, states=names(f), parameters = NULL, inputs=NULL) {
n <- length(f)
adjNames <- paste0("adj", names(f))
## Compute adjoint sensitivities
jac <- matrix(jacobianSymb(f), n)
negadj <- as.vector(prodSymb(t(jac), matrix(adjNames, ncol=1)))
adj <- paste0("-(", negadj, ")")
names(adj) <- adjNames
negres <- paste0("(", states, "D - ", states, ")")
wres <- paste(negres, paste0("weight", states, "D"), sep="*")
adj[paste0("adj", states)] <- paste(adj[paste0("adj", states)], wres, sep="+")
## Input equations
u <- NULL
if(!is.null(inputs)) {
jac <- matrix(jacobianSymb(f, inputs), n )
u <- as.vector(
sumSymb(paste0(inputs, "D"),
matrix(paste0("-(",
as.vector(
prodSymb(t(jac), matrix(adjNames, ncol=1))),
")*eps/weight", inputs, "D"),
ncol=1)
)
)
u <- paste0("(", u, ")")
names(u) <- inputs
}
## Forcings required by the BVP solver
forcings <- paste0(c(states, inputs), "D")
forcings <- c(forcings, paste0("weight", forcings))
## Time-continous log-likelihood
res <- paste0("(", c(states, inputs), "-", c(states, inputs), "D)")
sres <- paste0(res, "^2")
wsres <- paste(paste0("weight", c(states, inputs), "D"),sres, sep="*")
chi <- paste(wsres, collapse = " + ")
names(chi) <- "chi"
attr(adj, "chi") <- chi
## Adjoint "gradient wrt parameters" equations
if(is.null(parameters)) {
symbols <- getSymbols(f)
parameters <- symbols[!(symbols%in%inputs) & !(symbols%in%names(f))]
}
if(length(parameters)>0) {
jac <- matrix(jacobianSymb(f, parameters), n)
grad <- as.vector(prodSymb(matrix(adjNames, nrow=1), jac))
gradP <- paste0("2*(", grad, ")")
names(gradP) <- paste("chi", parameters, sep=".")
attr(adj, "grad") <- gradP
}
attr(adj, "forcings") <- forcings
attr(adj, "inputs") <- u
return(adj)
}
#'@title Forcings data.frame
#'@name forcData
#'@docType data
#'@description Forcings data.frame
#'
NULL
#'@title Time-course data of O, O2 and O3
#'@name oxygenData
#'@docType data
#'@description Forcings data.frame
NULL
|
5d9afcacde0dca48acfaf60ec3a97dbb0b189fc2 | 7f2c9d01bfb23d4446a9241ac57613175015e471 | /Etapa_2/Lesson_1/Ejemplo_04.R | 49dcdc22f71c12eb2138f426f1e24e109b0f43fb | [] | no_license | AnaNava1996/BEDU-Banco-Santander-3-caminos-Data-Analytics | ce462e18fe254231b52c7a6bf631a07f74961b38 | cf7d3dc74376a433505e3986c9572250f4ab8812 | refs/heads/master | 2023-02-23T11:36:41.753840 | 2021-01-31T02:01:36 | 2021-01-31T02:01:36 | 308,101,201 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,354 | r | Ejemplo_04.R | # EJEMPLO 4. Descarga y lectura de data sets.
# Objetivo
# Fijar y conocer ruta de trabajo
# Lectura de ficheros CSV, locales y en repositorio
# Manejo de objetos
# Requisitos
# R, RStudio
# Prework
# Desarrollo
# Ejecutar las líneas de comando y tratar de comprender que realiza cada parte de sus entradas
# Se pueden obtener diversos data sets de Kaggle, visita el sitio para que te familiarices
# La siguiente es una base de datos de los libros más vendidos en Amazon del 2009 - 2019
# Obtenemos la ruta del directorio de trabajo
getwd()
# Fijando el directorio de trabajo
setwd("/home/ana/Desktop/BEDU/BEDU-Banco-Santander-3-caminos-Data-Analytics/Etapa_2/Lesson_1/") # Depende del usuario
# La función read.csv será util para leer fichero .csv
read.csv("./Data/bestsellers with categories.csv") # El archivo csv debe estar en el directorio de trabajo
# se puede asignar a un objeto el fichero leido anteriormente
amazon.books <- read.csv("./Data/bestsellers with categories.csv")
tail(amazon.books); str(amazon.books)
# También se puede leer el fichero directamente desde una URL
data.url <- read.csv("https://www.football-data.co.uk/mmz4281/2021/SP1.csv")
tail(data.url); str(data.url)
# Calculamos la dimensión de la base de datos
dim(amazon.books)
# El tipo de objeto se puede saber utilizando class()
class(amazon.books)
|
347297d68e87df6565bf8584aa4c2e4246dcb1ad | fd0622e97276bba2c04d3c2fcba902cdfb65e214 | /packages/nimble/man/any_na.Rd | 9fa248829d95f00262277a1e2b260ecc8331c3fa | [
"BSD-3-Clause",
"CC-BY-4.0",
"GPL-2.0-only",
"GPL-1.0-or-later",
"MPL-2.0",
"GPL-2.0-or-later"
] | permissive | nimble-dev/nimble | 7942cccd73815611e348d4c674a73b2bc113967d | 29f46eb3e7c7091f49b104277502d5c40ce98bf1 | refs/heads/devel | 2023-09-01T06:54:39.252714 | 2023-08-21T00:51:40 | 2023-08-21T00:51:40 | 20,771,527 | 147 | 31 | BSD-3-Clause | 2023-08-12T13:04:54 | 2014-06-12T14:58:42 | C++ | UTF-8 | R | false | true | 482 | rd | any_na.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nimbleFunction_Rexecution.R
\name{any_na}
\alias{any_na}
\alias{any_nan}
\title{Determine if any values in a vector are NA or NaN}
\usage{
any_na(x)
any_nan(x)
}
\arguments{
\item{x}{vector of values}
}
\description{
NIMBLE language functions that can be used in either compiled or uncompiled
nimbleFunctions to detect if there are any NA or NaN values in a vector.
}
\author{
NIMBLE Development Team
}
|
f4bceb61990af8d974815fa41611bb59ac58fa41 | dd16792ef4efe0266d568fc0a21bbcd5304109ce | /diversity_and_ordination.R | 9f3a59f9aaa53f1d91233e68c44bbb43f4e331e7 | [] | no_license | paulbible/microbiome_plotting_in_R | 3b5f6a2d3683bc71eb88c9a7094660c0a1dbf712 | a97880046c3cf7d67c030c1220686b84d6190b39 | refs/heads/master | 2021-01-23T07:26:26.252553 | 2017-03-31T06:00:16 | 2017-03-31T06:00:16 | 86,423,842 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,877 | r | diversity_and_ordination.R | # load needed libraries
library(vegan)
library(ggplot2)
library(reshape2)
########################
#setwd("path/to/data")
# load test data
species.table <- read.delim("microbiome_test_data_species.tsv", sep="\t", header=T, row.names = 1)
# get the groups from the first row, and covert it to a simple list (called a vector)
groups <- as.vector(unlist(species.table[1,]))
# remove the first row to get the table and convert to a matrix
# remove row 1
tmp.matrix <- species.table[-1,]
# Convert data to numberic types
species.counts <- apply(as.matrix.noquote(tmp.matrix),2,as.numeric)
# reset the correct column and row names
rownames(species.counts) <- rownames(tmp.matrix)
colnames(species.counts) <- colnames(tmp.matrix)
# optional filtering
species.filtered <- species.counts
species.filtered[species.filtered < 2000 ] <- 0
# remove zero rows
rsums <- rowSums(species.filtered)
species.filtered <- species.filtered[-(which(rsums == 0)),]
species.counts <- species.filtered
# calculate relative abundance (vegan's decostand). This transposes the data.
species.relab <- decostand(t(species.counts), "total")
#################
# Alpha Diversity
#################
alpha.diversity <- diversity(species.relab, "shannon")
alpha.div.table <- data.frame(sample=rownames(species.relab),
alpha=alpha.diversity,
group=groups)
# Normal R Boxplot alpha diversity by group
boxplot(alpha.diversity ~ groups,
xlab="Groups",
ylab="Shannon Index",
main="Alpha Diversity (Shannon Index) by Group")
# ggplot2 boxplot for alpha diversity
ggplot(alpha.div.table,
aes(x=group, y=alpha, fill=group)) +
# set labels for the plot
labs(list(title = "Alpha Diversity (Shannon Index) by Group",
x = "Groups",
y = "Shannon Index",
fill="Fill Legend Title")) +
# use a bar plot
geom_boxplot() +
# Manually set colors here
scale_fill_discrete(breaks = c("group_1", "group_2", "group_3", "group_4"),
labels = c("This is G1", "This is G2", "This is G3", "This is G4")) +
# Manaully edit X axis labels
scale_x_discrete(breaks = c("group_1", "group_2", "group_3", "group_4"),
labels = c("This is G1", "This is G2", "This is G3", "This is G4")) +
# Rotate the x-axis sample labels, center the plot title
theme(plot.title = element_text(hjust = 0.5),
legend.position="none") # hide legend, legend is not necessary for this plot
# change sample order
# ggplot2 will order samples base on their order in the 'levels' of a 'factor'
# This command sets the order of sample to be the same as their normal order
alpha.div.table <- within(alpha.div.table,
sample <- factor(sample,
levels=sample))
# Plot alpha diversity for each sample
ggplot(alpha.div.table,
aes(x=sample, y=alpha)) +
# set labels for the plot
labs(list(title = "Alpha Diversity (Shannon Index) by Group",
x = "Samples",
y = "Shannon Index",
fill="Fill Legend Title")) +
# use a bar plot
geom_bar(aes(x=sample, fill=factor(group)), stat="identity") +
# set the group name labels, replace these with your group labels.
scale_fill_discrete(breaks = c("group_1", "group_2", "group_3", "group_4"),
labels = c("This is G1", "This is G2", "This is G3", "This is G4")) +
# Rotate the x-axis sample labels, center the plot title
theme(axis.text.x = element_text(angle = 90, hjust=1, vjust=0.40),
plot.title = element_text(hjust = 0.5))
# Plot alpha diversity for each sample, with manually set colors.
ggplot(alpha.div.table,
aes(x=sample, y=alpha)) +
# set labels for the plot
labs(list(title = "Alpha Diversity (Shannon Index) by Group",
x = "Samples",
y = "Shannon Index",
fill="Fill Legend Title")) +
# use a bar plot
geom_bar(aes(x=sample, fill=factor(group)), stat="identity") +
# Manually set colors here, replace breaks with your group labels.
scale_fill_manual(values = c("red", "green", "blue", "orange"),
breaks = c("group_1", "group_2", "group_3", "group_4"),
labels = c("This is G1", "This is G2", "This is G3", "This is G4")) +
# Rotate the x-axis sample labels, center the plot title
theme(axis.text.x = element_text(angle = 90, hjust=1, vjust=0.40),
plot.title = element_text(hjust = 0.5))
###############################
# Beta diversity and ordination
###############################
# calculate the Bray-Curtis distance (beta-diversity), using vegandist
bray.distance <- vegdist(species.relab, "bray")
# calculate the mutlidimentional scaling (mds) associated with Bray-Curtis distance
mds.bray <- cmdscale(bray.distance, 3, eig=TRUE)
# Plot or PCoA for Bray-Curtis beta-diversity
# create a data.frame for the PCoA data that ggplot2 will like
df = data.frame(PCoA1=mds.bray$points[,1], PCoA2=mds.bray$points[,2], group=groups)
ggplot(data=df,
aes(x=PCoA1,
y=PCoA2,
color=group)) +
geom_point(size=2) +
labs(list(title = "PCoA of Relative abundance (Bray-Curtis Distance)",
x = "PCo1",
y = "PCo2")) +
scale_color_discrete(name="Fill Group Title",
breaks = c("group_1", "group_2", "group_3", "group_4"),
labels = c("This is G1", "This is G2", "This is G3", "This is G4")) +
theme(legend.position = 'bottom', plot.title = element_text(hjust = 0.5))
# PERMANOVA, Test for location effects (mean) of the groups (adonis function in vegan)
permanova.mod <- adonis(bray.distance ~ groups, perm=20000)
print(permanova.mod)
# Test for differences in dispersion effects (variance)
dispersion.mod <- betadisper(bray.distance, groups)
anova(dispersion.mod)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.