blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
327
content_id
stringlengths
40
40
detected_licenses
listlengths
0
91
license_type
stringclasses
2 values
repo_name
stringlengths
5
134
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
46 values
visit_date
timestamp[us]date
2016-08-02 22:44:29
2023-09-06 08:39:28
revision_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
committer_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
github_id
int64
19.4k
671M
star_events_count
int64
0
40k
fork_events_count
int64
0
32.4k
gha_license_id
stringclasses
14 values
gha_event_created_at
timestamp[us]date
2012-06-21 16:39:19
2023-09-14 21:52:42
gha_created_at
timestamp[us]date
2008-05-25 01:21:32
2023-06-28 13:19:12
gha_language
stringclasses
60 values
src_encoding
stringclasses
24 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
7
9.18M
extension
stringclasses
20 values
filename
stringlengths
1
141
content
stringlengths
7
9.18M
fcec51de69d94721b28fd4123def77e873676169
431d385c76325212a14a52570c9c2c8aecdd7a27
/plotSentiments.r
10127d2fa703bc422e2369a7d6736dddf16def26
[]
no_license
mattravenhall/TweetSentiment
2e5d5f20195ceb3eeae4f7d73559ac8d36c35172
3fb92a43d522f1534e7c3075a0146346b5795713
refs/heads/master
2021-03-31T01:42:33.344348
2018-03-08T17:16:42
2018-03-08T17:16:42
124,422,683
0
0
null
null
null
null
UTF-8
R
false
false
1,130
r
plotSentiments.r
library(data.table) # Clean out tweets from .csv system("cut -d',' -f1-3 sentiments.txt > sentiments2.txt") # Read in .csv dat <- fread('sentiments2.txt') dat$TimeClean <- as.POSIXct(dat$Time, format="%Y-%m-%d %H:%M:%S") dat <- subset(dat, nTweets > 5) #dat <- dat[!is.na(dat$Sentiment),] # Plot .csv to file png('sentiments_by_day.png',height=400, width=1200) plot(dat$TimeClean, dat$Sentiment, type='h', lwd=4, lend=1, cex=0.5, pch=20, ylim=c(-0.5,0.5), #ylim=c(1,1), col=c('goldenrod','darkgrey')[as.numeric(month(dat$TimeClean) %% 2 != 0)+1], main='#phdlife', xlab='Time', ylab='Sentiment') abline(h=0,lty=2,col='grey') dev.off() # Plot sentiment by hour png('sentiments_by_hour.png', height=600, width=800) plot(dat$Sentiment ~ jitter(hour(dat$Time),1), pch=20, cex=0.8, main='#phdlife', xlab='Hour', ylab='Sentiment', ylim=c(-1,1)) lines(sort(unique(hour(dat$Time))), by(dat$Sentiment, hour(dat$Time), mean),col='red') dev.off() png('sentiments_by_hour_box.png', height=600, width=800) boxplot(dat$Sentiment ~ hour(dat$Time), col='goldenrod', main='#phdlife', xlab='Hour', ylab='Sentiment', ylim=c(-1,1)) dev.off()
986e6b3be960d274a25a043e18f19ad72af5598e
f9f39737fc94196ff48525acd49f6828c7719eac
/functions.R
b529619e4ca1247d8f1498b5e817d86a3e12b917
[ "MIT" ]
permissive
KatharinaGruber/BrazilWindpower_biascorr
cbac02514c0d4423cff84358356f2d0a1db53032
6849dda96c2999157194c7d4d2aa80e2cda45c98
refs/heads/master
2021-06-16T18:07:59.508895
2021-03-23T07:37:19
2021-03-23T07:37:19
175,448,056
0
0
null
null
null
null
ISO-8859-1
R
false
false
58,213
r
functions.R
# this file contains functions for the simulation of wind power generation from MERRA-2 data # and also for performing different types of bias correction # function to calculate power generated in one location without using correction factors # method: interpolation method to use (1:NN,2:BLI,4:IDW, no BCI because useless) # uses Ryberg power curve model and 2 heights for extrapolation calcstatpower <- function(method){ # get data of windparks: capacities and start dates and sort by start dates for each location load(paste(dirwindparks,"/windparks_complete.RData",sep="")) windparks <- data.frame(windparks,comdate=as.POSIXct(paste(windparks$year,"-",windparks$month,"-",windparks$day," 00:00:00",sep=""),tz="UTC")) windparks <- windparks[which(windparks$comdate < as.POSIXct("2017-08-31 00:00:00",tz="UTC")),] windparks$comdate[which(windparks$comdate<date.start)] <- date.start # extract information on wind turbine type, rotor diameter, capacity of wind turbines, and number of installed turbines # turbine type ind_type <- which(!is.na(windparks$turbines)) windparks$type <- NA windparks$type[ind_type] <- sapply(strsplit(sapply(strsplit(windparks$turbines[ind_type],":"),"[[",2),"[(]"),"[[",1) # rotor diameter ind_diam <- ind_cap <- which(unlist(lapply(strsplit(windparks$turbines,"power"),length))==2) windparks$diam <- NA windparks$diam[ind_diam] <- as.numeric(sapply(strsplit(sapply(strsplit(windparks$turbines[ind_diam],"diameter"),"[[",2),"m\\)"),"[[",1)) # capacity windparks$tcap <- NA windparks$tcap[ind_diam] <- as.numeric(sapply(strsplit(sapply(strsplit(windparks$turbines[ind_diam],"power"),"[[",2),"kW"),"[[",1)) # number of turbines nturb <- gsub("turbines","",sapply(strsplit(windparks$turbines,":"),"[[",1)) nturb <- gsub("turbine","",nturb) nturb <- as.numeric(gsub("Turbine\\(s\\)","",nturb)) windparks$n <- nturb # fill in missing information with mean number of turbines windparks$n[is.na(windparks$n)] <- mean(as.numeric(windparks$n),na.rm=TRUE) # add specific turbine power (in W, for using Ryberg power curve model) # see https://doi.org/10.1016/j.energy.2019.06.052 windparks$sp <- windparks$tcap*1000/(windparks$diam^2/4*pi) # fill in missing information with mean specific power weighted by number of turbines and year ind_sp <- which(!is.na(windparks$sp)) yearly_sp <- aggregate((windparks$sp*windparks$n)[ind_sp],by=list(year(windparks$comdate)[ind_sp]),mean) yearly_sp[,2] <- yearly_sp[,2]/aggregate(windparks$n[ind_sp],by=list(year(windparks$comdate)[ind_sp]),mean)[,2] windparks$sp[is.na(windparks$sp)] <- yearly_sp[match(year(windparks$comdate),yearly_sp[,1])[is.na(windparks$sp)],2] # add hypothetical hubheight (is not included in dataset but linear function to estimate it from rotor diamter was fitted from US wind turbine database) windparks$hh <- 0.4761*windparks$diam + 36.5295 # fill in missing values with mean hh weighted by number of turbines and year ind_hh <- which(!is.na(windparks$hh)) yearly_hh <- aggregate((windparks$hh*windparks$n)[ind_hh],by=list(year(windparks$comdate)[ind_hh]),mean) yearly_hh[,2] <- yearly_hh[,2]/aggregate(windparks$n[ind_hh],by=list(year(windparks$comdate)[ind_hh]),mean)[,2] windparks$hh[is.na(windparks$hh)] <- yearly_hh[match(year(windparks$comdate),yearly_hh[,1])[is.na(windparks$hh)],2] statpowlist <- list() for(ind in c(1:length(windparks[,1]))){ print(ind) pplon <- windparks$long[ind] pplat <- windparks$lat[ind] # find nearest neightbour MERRA and extrapolate to hubheight long <<- pplon lat <<- pplat lldo <<- distanceorder() NNmer <- NNdf(method,windparks$hh[ind]) # cut-out wind speed: 25 m/s (-> set everything above to 0) NNmer[which(NNmer[,2]>25),2] <- 0 # calculate power output for all hours from power curve in kWh # values are interpolated linearly betweer points of power curve # Ryberg power curve model RybCoeff <- read.csv(paste0(ryberg_path,"/ryberg_coeff.csv"),sep=",") names(RybCoeff) <- c("CF","A","B") v <- mapply(function(A,B) exp(A+B*log(windparks$sp[ind])), RybCoeff$A, RybCoeff$B) # calculate power output statpower <- as.data.frame(approx(x=c(0,v,100),y=c(0,RybCoeff$CF/100,1)*windparks$cap[ind],xout=NNmer$vext)) # set production before commissioning 0 statpower$y[which(NNmer$date<windparks$comdate[ind])] <- 0 # add to results statpowlist[[ind]] <- data.frame(NNmer[,1],statpower$y) } return(statpowlist) } #function for calculating distances and order by distances for MERRA distanceorder <- function(){ distance <- 6378.388*acos(sin(rad*lat) * sin(rad*LonLat$lat) + cos(rad*lat) * cos(rad*LonLat$lat) * cos(rad*LonLat$long-rad*long)) lonlatdistao <- data.frame(LonLat,distance,c(1:length(distance))) names(lonlatdistao) <- c("Longitude","Latitude","distance","MRRnum") lonlatdistao <- lonlatdistao[order(lonlatdistao[,3]),] return(lonlatdistao) } # Interpolation of MERRA wind speeds # extrapolation from three heights # returns a dataframe with nearest neighbour time, wind speeds and disph # method refers to the method of interpolation: # 1... nearest neighbour interpolation # 2... bilinear interpolation # 3... bicubic interpolation (discarded) # 4... inverse weighting of distances NNdf3h <- function(method,hubheight=10){ setwd(dirmerra) switch(method, { ########## 1. Nearest Neighbour ########## # first row (nearest neighbour) is extracted # first column of list is taken (distance to station) # and columns 4 to 27, long and lat are excluded MRRdf <- getMerraPoint(lldo$Longitude[1],lldo$Latitude[1]) # Wind speeds Nearest Neighbor WHuv50 <- sqrt(MRRdf$U50M^2+MRRdf$V50M^2) WHuv10 <- sqrt(MRRdf$U10M^2+MRRdf$V10M^2) WHuv2 <- sqrt(MRRdf$U2M^2+MRRdf$V2M^2) # WH MERRA-DF MWH <- data.frame(MRRdf$MerraDate,WHuv50,WHuv10,WHuv2,MRRdf$DISPH) MWH1 <- MWH[which(MWH[,1]>=date.start),] MWH1ext <-data.frame(MWH1[,1],extrap(MWH1,hubheight)) names(MWH1ext) <- c("date","vext") return(MWH1ext) }, { ########## 2. Bilinear Interpolation ########## # first row (nearest neighbour) is extracted # three other neighbours in square around station are searched # in case point lies between two points (on lon or lat line) only 2 points are used for calculation # in case point lies exactly on Merra point, Nearest Neighbour method is used instead MRRdfs <- list() WHuv50 <- list() WHuv10 <- list() WHuv2 <- list() WHuvext <- list() #find coordinates of square points around station lonNN <- lldo[1,1] latNN <- lldo[1,2] if((lonNN==long)&&(latNN==lat)){ ################################ # in case station is on MERRA point ################################ #print(paste("Using NN Method for station",statn)) return(NNdf(1,hubheight)) }else if(lonNN==long){ ################################ # in case station is on lon line ################################ #print(paste("For station",statn,"interpolation only between lats")) if(latNN < lat){ lat1 <- latNN lat2 <- latNN+0.5 }else{ lat2 <- latNN lat1 <- latNN-0.5 } lats <- NULL lons <- lonNN lats[1] <- lldo$Latitude[first(which(as.numeric(as.factor(abs(lldo$Latitude-lat1)))==1))] lats[2] <- lldo$Latitude[first(which(as.numeric(as.factor(abs(lldo$Latitude-lat2)))==1))] setwd(dirmerra) MRRdfs[[1]] <- getMerraPoint(lons[1],lats[1]) MRRdfs[[2]] <- getMerraPoint(lons[2],lats[1]) # calculation of coefficients coeff1 <- (lats[2]-lat)/(lats[2]-lats[1]) coeff2 <- (lat-lats[1])/(lats[2]-lats[1]) # wind speeds square for(i in c(1:2)){ WHuv2[[i]] <- sqrt(MRRdfs[[i]]$U2M^2 + MRRdfs[[i]]$V2M^2) WHuv10[[i]] <- sqrt(MRRdfs[[i]]$U10M^2 + MRRdfs[[i]]$V10M^2) WHuv50[[i]] <- sqrt(MRRdfs[[i]]$U50M^2 + MRRdfs[[i]]$V50M^2) } # extrapolation for(i in c(1:2)){ WHuvext[[i]] <- extrap(data.frame(MRRdfs[[i]]$MerraDate,WHuv50[[i]],WHuv10[[i]],WHuv2[[i]],MRRdfs[[i]]$DISPH),hubheight) } #interpolation UVBLIext <- coeff1*WHuvext[[1]]+coeff2*WHuvext[[2]] }else if(latNN==lat){ ################################ # in case station is on lat line ################################ #print(paste("For station",statn,"interpolation only between lons")) if(lonNN < long){ lon1 <- lonNN lon2 <- lonNN+0.625 }else{ lon2 <- lonNN lon1 <- lonNN-0.625 } lats <- latNN lons <- NULL lons[1] <- lldo$Longitude[first(which(as.numeric(as.factor(abs(lldo$Longitude-lon1)))==1))] lons[2] <- lldo$Longitude[first(which(as.numeric(as.factor(abs(lldo$Longitude-lon2)))==1))] setwd(dirmerra) MRRdfs[[1]] <- getMerraPoint(lons[1],lats[1]) MRRdfs[[2]] <- getMerraPoint(lons[2],lats[1]) # calculation of coefficients coeff1 <- (lons[2]-long)/(lons[2]-lons[1]) coeff2 <- (long-lons[1])/(lons[2]-lons[1]) # wind speeds square for(i in c(1:2)){ WHuv2[[i]] <- sqrt(MRRdfs[[i]]$U2M^2 + MRRdfs[[i]]$V2M^2) WHuv10[[i]] <- sqrt(MRRdfs[[i]]$U10M^2 + MRRdfs[[i]]$V10M^2) WHuv50[[i]] <- sqrt(MRRdfs[[i]]$U50M^2 + MRRdfs[[i]]$V50M^2) } # extrapolation for(i in c(1:2)){ WHuvext[[i]] <- extrap(data.frame(MRRdfs[[i]]$MerraDate,WHuv50[[i]],WHuv10[[i]],WHuv2[[i]],MRRdfs[[i]]$DISPH),hubheight) } #interpolation UVBLIext <- coeff1*WHuvext[[1]]+coeff2*WHuvext[[2]] }else{ ################################ # in case station is inside square ################################ if(lonNN < long){ lon1 <- lonNN lon2 <- lonNN+0.625 }else{ lon2 <- lonNN lon1 <- lonNN-0.625 } if(latNN < lat){ lat1 <- latNN lat2 <- latNN+0.5 }else{ lat2 <- latNN lat1 <- latNN-0.5 } # get coordinates from LonLat because numeric problems... lats <- NULL lons <- NULL lats[1] <- lldo$Latitude[first(which(as.numeric(as.factor(abs(lldo$Latitude-lat1)))==1))] lats[2] <- lldo$Latitude[first(which(as.numeric(as.factor(abs(lldo$Latitude-lat2)))==1))] lons[1] <- lldo$Longitude[first(which(as.numeric(as.factor(abs(lldo$Longitude-lon1)))==1))] lons[2] <- lldo$Longitude[first(which(as.numeric(as.factor(abs(lldo$Longitude-lon2)))==1))] setwd(dirmerra) MRRdfs[[1]] <- getMerraPoint(lons[1],lats[1]) MRRdfs[[2]] <- getMerraPoint(lons[1],lats[2]) MRRdfs[[3]] <- getMerraPoint(lons[2],lats[1]) MRRdfs[[4]] <- getMerraPoint(lons[2],lats[2]) #calculation of coefficients for bilinear interpolation coeff1 <- (lons[2]-long)/(lons[2]-lons[1])*(lats[2]-lat)/(lats[2]-lats[1]) coeff2 <- (lons[2]-long)/(lons[2]-lons[1])*(lat-lats[1])/(lats[2]-lats[1]) coeff3 <- (long-lons[1])/(lons[2]-lons[1])*(lats[2]-lat)/(lats[2]-lats[1]) coeff4 <- (long-lons[1])/(lons[2]-lons[1])*(lat-lats[1])/(lats[2]-lats[1]) # wind speeds square for(i in c(1:4)){ WHuv2[[i]] <- sqrt(MRRdfs[[i]]$U2M^2 + MRRdfs[[i]]$V2M^2) WHuv10[[i]] <- sqrt(MRRdfs[[i]]$U10M^2 + MRRdfs[[i]]$V10M^2) WHuv50[[i]] <- sqrt(MRRdfs[[i]]$U50M^2 + MRRdfs[[i]]$V50M^2) } # extrapolation for(i in c(1:4)){ WHuvext[[i]] <- extrap(data.frame(MRRdfs[[i]]$MerraDate,WHuv50[[i]],WHuv10[[i]],WHuv2[[i]],MRRdfs[[i]]$DISPH),hubheight) } #interpolation UVBLIext <- coeff1*WHuvext[[1]]+coeff2*WHuvext[[2]]+coeff3*WHuvext[[3]]+coeff4*WHuvext[[4]] } # WH MERRA-DF MWH <- data.frame(MRRdfs[[1]]$MerraDate,UVBLIext) MWH1 <- MWH[which(MWH[,1]>=date.start),] names(MWH1) <- c("date","vext") return(MWH1) }, { ########## 3. Bicubic Interpolation ########## # dismissed }, { ########## 4. Inverse Distance Weighting ########## # first four rows (4 nearest neighbours) are extracted # the inverse distances are calculated # new velocities are found by weighting by the inverse distances MRRdfs <- list() WHuv50 <- list() WHuv10 <- list() WHuv2 <- list() for(i in c(1:4)){ MRRdfs[[i]] <- getMerraPoint(lldo$Longitude[i],lldo$Latitude[i]) } # Wind speeds 4 Neighbors Square for(i in c(1:4)){ WHuv50[[i]] <- sqrt(MRRdfs[[i]]$U50M^2+MRRdfs[[i]]$V50M^2) WHuv10[[i]] <- sqrt(MRRdfs[[i]]$U10M^2+MRRdfs[[i]]$V10M^2) WHuv2[[i]] <- sqrt(MRRdfs[[i]]$U2M^2+MRRdfs[[i]]$V2M^2) } # extrapolation WHext <- list() for(i in c(1:4)){ WHext[[i]] <- extrap(data.frame(MRRdfs[[1]]$MerraDate,WHuv50[[i]],WHuv10[[i]],WHuv2[[i]],MRRdfs[[i]]$DISPH),hubheight) } WHextdf <- as.data.frame(WHext) names(WHextdf) <-c("v1","v2","v3","v4") #calculation of coefficients for inverse distance weighting and interpolation IDWcoeffs <- list() distas <- c(lldo$distance[1],lldo$distance[2],lldo$distance[3],lldo$distance[4]) invdistasum <- sum(1/distas[1],1/distas[2],1/distas[3],1/distas[4]) for(i in c(1:4)){ IDWcoeffs[[i]] <- (1/(distas[i]))/(invdistasum) } coeffdf <- data.frame(matrix(rep(unlist(IDWcoeffs),each=length(WHextdf[,1])),nrow=length(WHextdf[,1]),ncol=4)) WHextIDW1 <- WHextdf*coeffdf WHextIDW <- WHextIDW1[,1]+WHextIDW1[,2]+WHextIDW1[,3]+WHextIDW1[,4] MWH <-data.frame(MRRdfs[[1]]$MerraDate,WHextIDW) MWH1 <- MWH[which(MWH[,1]>=date.start),] names(MWH1) <- c("date","vext") return(MWH1) }) } # Interpolation of MERRA wind speeds # extrapolation from three heights # returns a dataframe with nearest neighbour time, wind speeds and disph # method refers to the method of interpolation: # 1... nearest neighbour interpolation # 2... bilinear interpolation # 3... bicubic interpolation (discarded) # 4... inverse weighting of distances NNdf <- function(method,hubheight=10){ setwd(dirmerra) switch(method, { ########## 1. Nearest Neighbour ########## # first row (nearest neighbour) is extracted # first column of list is taken (distance to station) # and columns 4 to 27, long and lat are excluded MRRdf <- getMerraPoint(lldo$Longitude[1],lldo$Latitude[1]) # Wind speeds Nearest Neighbor WHuv50 <- sqrt(MRRdf$U50M^2+MRRdf$V50M^2) WHuv10 <- sqrt(MRRdf$U10M^2+MRRdf$V10M^2) # WH MERRA-DF MWH <- data.frame(MRRdf$MerraDate,WHuv50,WHuv10,MRRdf$DISPH) MWH1 <- MWH[which(MWH[,1]>=date.start),] MWH1ext <-data.frame(MWH1[,1],extrap(MWH1,hubheight)) names(MWH1ext) <- c("date","vext") return(MWH1ext) }, { ########## 2. Bilinear Interpolation ########## # first row (nearest neighbour) is extracted # three other neighbours in square around station are searched # in case point lies between two points (on lon or lat line) only 2 points are used for calculation # in case point lies exactly on Merra point, Nearest Neighbour method is used instead MRRdfs <- list() WHuv50 <- list() WHuv10 <- list() WHuvext <- list() #find coordinates of square points around station lonNN <- lldo[1,1] latNN <- lldo[1,2] if((lonNN==long)&&(latNN==lat)){ ################################ # in case station is on MERRA point ################################ #print(paste("Using NN Method for station",statn)) return(NNdf(1,hubheight)) }else if(lonNN==long){ ################################ # in case station is on lon line ################################ #print(paste("For station",statn,"interpolation only between lats")) if(latNN < lat){ lat1 <- latNN lat2 <- latNN+0.5 }else{ lat2 <- latNN lat1 <- latNN-0.5 } lats <- NULL lons <- lonNN lats[1] <- lldo$Latitude[first(which(as.numeric(as.factor(abs(lldo$Latitude-lat1)))==1))] lats[2] <- lldo$Latitude[first(which(as.numeric(as.factor(abs(lldo$Latitude-lat2)))==1))] setwd(dirmerra) MRRdfs[[1]] <- getMerraPoint(lons[1],lats[1]) MRRdfs[[2]] <- getMerraPoint(lons[2],lats[1]) # calculation of coefficients coeff1 <- (lats[2]-lat)/(lats[2]-lats[1]) coeff2 <- (lat-lats[1])/(lats[2]-lats[1]) # wind speeds square for(i in c(1:2)){ WHuv10[[i]] <- sqrt(MRRdfs[[i]]$U10M^2 + MRRdfs[[i]]$V10M^2) WHuv50[[i]] <- sqrt(MRRdfs[[i]]$U50M^2 + MRRdfs[[i]]$V50M^2) } # extrapolation for(i in c(1:2)){ WHuvext[[i]] <- extrap(data.frame(MRRdfs[[i]]$MerraDate,WHuv50[[i]],WHuv10[[i]],MRRdfs[[i]]$DISPH),hubheight) } #interpolation UVBLIext <- coeff1*WHuvext[[1]]+coeff2*WHuvext[[2]] }else if(latNN==lat){ ################################ # in case station is on lat line ################################ #print(paste("For station",statn,"interpolation only between lons")) if(lonNN < long){ lon1 <- lonNN lon2 <- lonNN+0.625 }else{ lon2 <- lonNN lon1 <- lonNN-0.625 } lats <- latNN lons <- NULL lons[1] <- lldo$Longitude[first(which(as.numeric(as.factor(abs(lldo$Longitude-lon1)))==1))] lons[2] <- lldo$Longitude[first(which(as.numeric(as.factor(abs(lldo$Longitude-lon2)))==1))] setwd(dirmerra) MRRdfs[[1]] <- getMerraPoint(lons[1],lats[1]) MRRdfs[[2]] <- getMerraPoint(lons[2],lats[1]) # calculation of coefficients coeff1 <- (lons[2]-long)/(lons[2]-lons[1]) coeff2 <- (long-lons[1])/(lons[2]-lons[1]) # wind speeds square for(i in c(1:2)){ WHuv10[[i]] <- sqrt(MRRdfs[[i]]$U10M^2 + MRRdfs[[i]]$V10M^2) WHuv50[[i]] <- sqrt(MRRdfs[[i]]$U50M^2 + MRRdfs[[i]]$V50M^2) } # extrapolation for(i in c(1:2)){ WHuvext[[i]] <- extrap(data.frame(MRRdfs[[i]]$MerraDate,WHuv50[[i]],WHuv10[[i]],MRRdfs[[i]]$DISPH),hubheight) } #interpolation UVBLIext <- coeff1*WHuvext[[1]]+coeff2*WHuvext[[2]] }else{ ################################ # in case station is inside square ################################ if(lonNN < long){ lon1 <- lonNN lon2 <- lonNN+0.625 }else{ lon2 <- lonNN lon1 <- lonNN-0.625 } if(latNN < lat){ lat1 <- latNN lat2 <- latNN+0.5 }else{ lat2 <- latNN lat1 <- latNN-0.5 } # get coordinates from LonLat because numeric problems... lats <- NULL lons <- NULL lats[1] <- lldo$Latitude[first(which(as.numeric(as.factor(abs(lldo$Latitude-lat1)))==1))] lats[2] <- lldo$Latitude[first(which(as.numeric(as.factor(abs(lldo$Latitude-lat2)))==1))] lons[1] <- lldo$Longitude[first(which(as.numeric(as.factor(abs(lldo$Longitude-lon1)))==1))] lons[2] <- lldo$Longitude[first(which(as.numeric(as.factor(abs(lldo$Longitude-lon2)))==1))] setwd(dirmerra) MRRdfs[[1]] <- getMerraPoint(lons[1],lats[1]) MRRdfs[[2]] <- getMerraPoint(lons[1],lats[2]) MRRdfs[[3]] <- getMerraPoint(lons[2],lats[1]) MRRdfs[[4]] <- getMerraPoint(lons[2],lats[2]) #calculation of coefficients for bilinear interpolation coeff1 <- (lons[2]-long)/(lons[2]-lons[1])*(lats[2]-lat)/(lats[2]-lats[1]) coeff2 <- (lons[2]-long)/(lons[2]-lons[1])*(lat-lats[1])/(lats[2]-lats[1]) coeff3 <- (long-lons[1])/(lons[2]-lons[1])*(lats[2]-lat)/(lats[2]-lats[1]) coeff4 <- (long-lons[1])/(lons[2]-lons[1])*(lat-lats[1])/(lats[2]-lats[1]) # wind speeds square for(i in c(1:4)){ WHuv10[[i]] <- sqrt(MRRdfs[[i]]$U10M^2 + MRRdfs[[i]]$V10M^2) WHuv50[[i]] <- sqrt(MRRdfs[[i]]$U50M^2 + MRRdfs[[i]]$V50M^2) } # extrapolation for(i in c(1:4)){ WHuvext[[i]] <- extrap(data.frame(MRRdfs[[i]]$MerraDate,WHuv50[[i]],WHuv10[[i]],MRRdfs[[i]]$DISPH),hubheight) } #interpolation UVBLIext <- coeff1*WHuvext[[1]]+coeff2*WHuvext[[2]]+coeff3*WHuvext[[3]]+coeff4*WHuvext[[4]] } # WH MERRA-DF MWH <- data.frame(MRRdfs[[1]]$MerraDate,UVBLIext) MWH1 <- MWH[which(MWH[,1]>=date.start),] names(MWH1) <- c("date","vext") return(MWH1) }, { ########## 3. Bicubic Interpolation ########## # dismissed }, { ########## 4. Inverse Distance Weighting ########## # first four rows (4 nearest neighbours) are extracted # the inverse distances are calculated # new velocities are found by weighting by the inverse distances MRRdfs <- list() WHuv50 <- list() WHuv10 <- list() for(i in c(1:4)){ MRRdfs[[i]] <- getMerraPoint(lldo$Longitude[i],lldo$Latitude[i]) } # Wind speeds 4 Neighbors Square for(i in c(1:4)){ WHuv50[[i]] <- sqrt(MRRdfs[[i]]$U50M^2+MRRdfs[[i]]$V50M^2) WHuv10[[i]] <- sqrt(MRRdfs[[i]]$U10M^2+MRRdfs[[i]]$V10M^2) } # extrapolation WHext <- list() for(i in c(1:4)){ WHext[[i]] <- extrap(data.frame(MRRdfs[[1]]$MerraDate,WHuv50[[i]],WHuv10[[i]],MRRdfs[[i]]$DISPH),hubheight) } WHextdf <- as.data.frame(WHext) names(WHextdf) <-c("v1","v2","v3","v4") #calculation of coefficients for inverse distance weighting and interpolation IDWcoeffs <- list() distas <- c(lldo$distance[1],lldo$distance[2],lldo$distance[3],lldo$distance[4]) invdistasum <- sum(1/distas[1],1/distas[2],1/distas[3],1/distas[4]) for(i in c(1:4)){ IDWcoeffs[[i]] <- (1/(distas[i]))/(invdistasum) } coeffdf <- data.frame(matrix(rep(unlist(IDWcoeffs),each=length(WHextdf[,1])),nrow=length(WHextdf[,1]),ncol=4)) WHextIDW1 <- WHextdf*coeffdf WHextIDW <- WHextIDW1[,1]+WHextIDW1[,2]+WHextIDW1[,3]+WHextIDW1[,4] MWH <-data.frame(MRRdfs[[1]]$MerraDate,WHextIDW) MWH1 <- MWH[which(MWH[,1]>=date.start),] names(MWH1) <- c("date","vext") return(MWH1) }) } #Extrapolation height using log wind profile with logarithmic least squares fit extrap3h <- function(MWH1,hIN=10){ # solution of regression model v_z = a + b*log(h_z) n <- 3 # b = (n * sum(v_j*log(h_j)) - sum(v_j) * sum(log(h_j))) / (n * sum((log(h_j))^2) - (sum(log(h_j)))^2) b <- (n * (MWH1[,2]*log(50) + MWH1[,3]*log(10+MWH1[,5]) + MWH1[,4]*log(2+MWH1[,5])) - (MWH1[,2]+MWH1[,3]+MWH1[,4]) * (log(50)+log(10+MWH1[,5])+log(2+MWH1[,5]))) / (n * ((log(50))^2+(log(10+MWH1[,5]))^2+(log(2+MWH1[,5]))^2) - (log(50)+log(10+MWH1[,5])+log(2+MWH1[,5]))^2) # a = (sum(v_j) - b*sum(log(h_j))) / n a <- ((MWH1[,2]+MWH1[,3]+MWH1[,4]) - b * (log(2+MWH1[,5])+log(10+MWH1[,5])+log(50))) / n #wind speed with power law vext <- a + b*log(hIN) # some wind speeds are negtaive -> fill in with last positive wind speed vext[which(vext<0)] <- NA return(na.locf(vext)) } # extrapolation using power law and two heights of wind speeds extrap <- function(MWH1,hIN=10){ #alpha friction coefficient alpha <- (log(MWH1[,2])-log(MWH1[,3]))/(log(50)-log(10+MWH1[,4])) #wind speed with power law vext <- MWH1[,2]*(hIN/50)^alpha return(vext) } # function to sum up power generation per state # spl statpowlist makeSTATEpowlist <- function(spl){ load(paste0(dirwindparks,"/windparks_complete.RData")) windparks <- data.frame(windparks,comdate=as.POSIXct(paste(windparks$year,"-",windparks$month,"-",windparks$day," 00:00:00",sep=""),tz="UTC")) windparks <- windparks[which(windparks$comdate < as.POSIXct("2017-08-31 00:00:00",tz="UTC")),] windparks$comdate[which(windparks$comdate<date.start)] <- date.start states <- unique(windparks$state) states<- states[order(states)] STATEpowlist <- list() for(i in c(1:length(states))){ print(states[i]) statepow <- NULL for(j in c(which(windparks$state==states[i]))){ if(length(statepow>0)){statepow[,2]=statepow[,2]+spl[[j]][,2]}else{statepow=spl[[j]]} } STATEpowlist[[i]] <- statepow } names(STATEpowlist) <- states return(STATEpowlist) } # function to sum up power generation per selected station # spl statpowlist makeWPpowlist <- function(spl){ load(paste0(dirwindparks,"/windparks_complete.RData")) windparks <- data.frame(windparks,comdate=as.POSIXct(paste(windparks$year,"-",windparks$month,"-",windparks$day," 00:00:00",sep=""),tz="UTC")) windparks <- windparks[which(windparks$comdate < as.POSIXct("2017-08-31 00:00:00",tz="UTC")),] windparks$comdate[which(windparks$comdate<date.start)] <- date.start load(paste0(dirwindparks_sel,"/selected_windparks.RData")) states <- unique(sel_windparks$state) ind <- which(windparks$name%in%sel_windparks$name) s_wp <- windparks[ind,] statpowlist <- list() for(i in c(1:length(states))){ print(states[i]) statpow <- NULL for(j in c(ind[which(s_wp$state==states[i])])){ if(length(statpow>0)){statpow[,2]=statpow[,2]+spl[[j]][,2]}else{statpow=spl[[j]]} } statpowlist[[i]] <- statpow } names(statpowlist) <- c("BA-Macaubas","CE-PraiaFormosa","PE-SaoClemente","PI-Araripe","RN-AlegriaII","RS-ElebrasCidreira1","SC-BomJardim") return(statpowlist) } getWPmeanCFs <- function(cfs){ load(paste0(dirwindparks,"/windparks_complete.RData")) windparks <- data.frame(windparks,comdate=as.POSIXct(paste(windparks$year,"-",windparks$month,"-",windparks$day," 00:00:00",sep=""),tz="UTC")) windparks <- windparks[which(windparks$comdate < as.POSIXct("2017-08-31 00:00:00",tz="UTC")),] windparks$comdate[which(windparks$comdate<date.start)] <- date.start load(paste0(dirwindparks_sel,"/selected_windparks.RData")) states <- unique(sel_windparks$state) ind <- which(windparks$name%in%sel_windparks$name) cfsWP <- unlist(cfs[ind]) ag <- aggregate(cfsWP,by=list(windparks$state[ind]),mean) res <- data.frame(wp=c("BA-Macaubas","CE-PraiaFormosa","PE-SaoClemente","PI-Araripe","RN-AlegriaII","RS-ElebrasCidreira1","SC-BomJardim"), cfs=ag$x) return(res) } # make sum of wind power generation for all of brazil sum_brasil <- function(complist){ df <- complist[[1]] for(i in c(2:length(complist))){ df[,2] <- df[,2]+complist[[i]][,2] } return(df) } # make sum of wind power generation for subsystems NE and S sum_subsystem <- function(complist){ subs <- data.frame(states=c("Bahia","Ceará","Maranhão","Minas Gerais","Paraíba","Paraná","Pernambuco","Piaui","RiodeJaneiro","RioGrandedoNorte","RioGrandedoSul","SantaCatarina","Sergipe"),subsystem=c("NE","NE","N","SE","NE","S","NE","NE","SE","NE","S","S","NE")) dfNE <- NULL dfS <- NULL for(i in which(subs[,2]=="NE")){ if(length(dfNE)>0){ dfNE[,2] <- dfNE[,2]+complist[[i]][,2] }else{ dfNE <- complist[[i]] } } for(i in which(subs[,2]=="S")){ if(length(dfS)>0){ dfS[,2] <- dfS[,2]+complist[[i]][,2] }else{ dfS <- complist[[i]] } } dflist <- list(dfNE,dfS) names(dflist) <- c("NE","S") return(dflist) } # daily aggregate wind power generation dailyaggregate <- function(statepowlist){ splagd <- list() for(i in c(1:length(statepowlist))){ days <- format(statepowlist[[1]][,1],"%Y%m%d") listnew <- aggregate(statepowlist[[i]][,2],by=list(days),sum) # insert dates in datetime format listnew[,1] <- as.POSIXct(paste(substr(listnew[,1],1,4),substr(listnew[,1],5,6),substr(listnew[,1],7,8),sep="-"),tz="UTC") names(listnew) <- c("time","wp") splagd[[i]] <- listnew } return(splagd) } # function for loading measured wind power for subsystems or brazil getprodSUBBRA <- function(area){ a <- data.frame(a=c("NE","S","BRASIL"),b=c("nordeste","sul","brasil")) prod <- read.table(paste(dirwindprodsubbra,"/",a$b[match(area,a$a)],"_dia.csv",sep=""),sep=";",header=T,stringsAsFactors=F,dec=",") # extract date and generation in GWh prod <- data.frame(date=as.POSIXct(paste(substr(prod[,1],7,10),substr(prod[,1],4,5),substr(prod[,1],1,2),sep="-")[-1],tz="UTC"),prod_GWh=prod[-1,8]) return(prod) } # function for loading measured wind power for states getSTATEproddaily <- function(state){ states <- gsub(".csv","",list.files(path=dirwindproddaily)) if(!is.na(match(state,states))){ STATEprod <- read.table(paste(dirwindproddaily,"/",states[match(state,states)],".csv",sep=""),sep=";",header=T,stringsAsFactors=F,dec=',') # first row is useless STATEprod <- STATEprod[2:length(STATEprod[,1]),] # extract yearmonth and generation in GWh STATEprod <- data.frame(date=as.POSIXct(paste(substr(STATEprod[,1],7,10),substr(STATEprod[,1],4,5),substr(STATEprod[,1],1,2),sep="-"),tz="UTC"),prod_GWh=STATEprod[,8]) return(STATEprod) }else{ return(NULL) } } getstatproddaily <- function(state){ files <- list.files(path=dirwindparks_sel,".csv") states <- substr(files,1,2) if(!is.na(match(state,states))){ STATEprod <- read.table(paste(dirwindparks_sel,"/",files[match(state,states)],sep=""),sep=";",header=T,stringsAsFactors=F,dec=',') # sometimes first row is useless if(STATEprod[1,1]==""){ STATEprod <- STATEprod[2:length(STATEprod[,1]),] } # extract yearmonth and generation in GWh STATEprod <- data.frame(date=as.POSIXct(paste(substr(STATEprod[,1],7,10),substr(STATEprod[,1],4,5),substr(STATEprod[,1],1,2),sep="-"),tz="UTC"),prod_GWh=STATEprod[,8]) return(STATEprod) }else{ return(NULL) } } # function that cuts two data frames to same length # data frames with two columns, first column has dates, second has data csl <- function(df1,df2){ cut1 <- max(df1[1,1],df2[1,1]) cut2 <- min(df1[nrow(df1),1],df2[nrow(df2),1]) df1.1 <- df1[which(df1[,1]==cut1):which(df1[,1]==cut2),] df2.1 <- df2[which(df2[,1]==cut1):which(df2[,1]==cut2),] df <- data.frame(df1.1[,1],df1.1[,2],df2.1[,2]) # in beginning there may be time where there is no production yet (leading 0s) -> remove it df <- df[which((df[,2]>0)&(df[,3]>0))[1]:length(df[,1]),] return(df) } # statn is number of station # function reads INMET data into a dataframe # long and lat are saved too readINMET <- function(statn,startdate){ stations <- read.table(paste(dirinmetmeta,"/stations_meta_data.csv",sep=""),sep=";",header=T,stringsAsFactors=F) # remove first and last two because they are not in Brazil stations <- stations[2:(length(stations[,1])-2),] final1 <- read.csv2(paste(dirinmet,"/",stations$name[statn],".csv",sep="")) # shift by 12 hours and convert to same time zone as MERRA data (UTC) dates.num <- format(final1[,1],tz="UTC") dates<-as.POSIXct(as.numeric(dates.num)-12*3600,tz="UTC",origin="1970-01-01") dates1<-dates[which(as.POSIXct(dates, tz="UTC")>=startdate)] wind<-final1[,9] wind1<-wind[which(as.POSIXct(dates, tz="UTC")>=startdate)] suppressWarnings(final_data<-data.frame(dates1,as.numeric(paste(wind1)))) # remove last row because from next day final_data<-final_data[1:(length(final_data$dates1)-1),1:2] names(final_data) <- c("dates1","wind1") long<<-stations$lon[statn] lat<<-stations$lat[statn] return(final_data) } # function which removes rows of at least len same entries occuring in col column in dataframe x # used to prepare INMET data because there are erroneous sequences rmrows <- function(x,len,col){ lengths <- data.frame(num=rle(x[,col])$lengths,cum=cumsum(rle(x[,col])$lengths)) lengths <- rbind(c(0,0),lengths) whichs <- which(lengths$num>=len) if(length(whichs)>0){ rowrm <- NULL for(i in c(1:length(whichs))){ rowrm <- c(rowrm,c((lengths[whichs[i]-1,2]+1):(lengths[whichs[i],2]))) } y <- x[-c(rowrm),] }else{ y=x } return(y) } # function to calculate power generated in one location with mean wind speed approximation # method: interpolation method to use (1:NN,2:BLI,4:IDW, no BCI because useless) # wscdata: which data to use for wind speed correction? ("INMET" or "WINDATLAS") # INMAXDIST: maximum distance to INMET station allowed for using it for correction # applylim: determines whether limit of INMAXDIST shall be applied calcstatpower_meanAPT <- function(method=1,wscdata="INMET",INmaxdist=0,applylim=1){ # get data of windparks: capacities and start dates and sort by start dates for each location load(paste(dirwindparks,"/windparks_complete.RData",sep="")) load(paste0(dirwindparks_sel,"/selected_windparks.RData")) windparks <- data.frame(windparks,comdate=as.POSIXct(paste(windparks$year,"-",windparks$month,"-",windparks$day," 00:00:00",sep=""),tz="UTC")) windparks <- windparks[which(windparks$comdate < as.POSIXct("2017-08-31 00:00:00",tz="UTC")),] windparks$comdate[which(windparks$comdate<date.start)] <- date.start # extract information on wind turbine type, rotor diameter, capacity of wind turbines, and number of installed turbines # turbine type ind_type <- which(!is.na(windparks$turbines)) windparks$type <- NA windparks$type[ind_type] <- sapply(strsplit(sapply(strsplit(windparks$turbines[ind_type],":"),"[[",2),"[(]"),"[[",1) # rotor diameter ind_diam <- ind_cap <- which(unlist(lapply(strsplit(windparks$turbines,"power"),length))==2) windparks$diam <- NA windparks$diam[ind_diam] <- as.numeric(sapply(strsplit(sapply(strsplit(windparks$turbines[ind_diam],"diameter"),"[[",2),"m\\)"),"[[",1)) # capacity windparks$tcap <- NA windparks$tcap[ind_diam] <- as.numeric(sapply(strsplit(sapply(strsplit(windparks$turbines[ind_diam],"power"),"[[",2),"kW"),"[[",1)) # number of turbines nturb <- gsub("turbines","",sapply(strsplit(windparks$turbines,":"),"[[",1)) nturb <- gsub("turbine","",nturb) nturb <- as.numeric(gsub("Turbine\\(s\\)","",nturb)) windparks$n <- nturb # fill in missing information with mean number of turbines windparks$n[is.na(windparks$n)] <- mean(as.numeric(windparks$n),na.rm=TRUE) # add specific turbine power (in W, for using Ryberg power curve model) # see https://doi.org/10.1016/j.energy.2019.06.052 windparks$sp <- windparks$tcap*1000/(windparks$diam^2/4*pi) # fill in missing information with mean specific power weighted by number of turbines and year ind_sp <- which(!is.na(windparks$sp)) yearly_sp <- aggregate((windparks$sp*windparks$n)[ind_sp],by=list(year(windparks$comdate)[ind_sp]),mean) yearly_sp[,2] <- yearly_sp[,2]/aggregate(windparks$n[ind_sp],by=list(year(windparks$comdate)[ind_sp]),mean)[,2] windparks$sp[is.na(windparks$sp)] <- yearly_sp[match(year(windparks$comdate),yearly_sp[,1])[is.na(windparks$sp)],2] # add hypothetical hubheight (is not included in dataset but linear function to estimate it from rotor diamter was fitted from US wind turbine database) windparks$hh <- 0.4761*windparks$diam + 36.5295 # fill in missing values with mean hh weighted by number of turbines and year ind_hh <- which(!is.na(windparks$hh)) yearly_hh <- aggregate((windparks$hh*windparks$n)[ind_hh],by=list(year(windparks$comdate)[ind_hh]),mean) yearly_hh[,2] <- yearly_hh[,2]/aggregate(windparks$n[ind_hh],by=list(year(windparks$comdate)[ind_hh]),mean)[,2] windparks$hh[is.na(windparks$hh)] <- yearly_hh[match(year(windparks$comdate),yearly_hh[,1])[is.na(windparks$hh)],2] statpowlist <- list() # list for saving correction factors cfs_mean <<-list() for(ind in c(1:length(windparks[,1]))){ print(ind) pplon <- windparks$long[ind] pplat <- windparks$lat[ind] # find nearest neightbour MERRA and extrapolate to hubheight long <<- pplon lat <<- pplat date.start <- as.POSIXct("2006-01-01", tz="UTC") lldo <<- distanceorder() NNmer <- NNdf(method,windparks$hh[ind]) # MEAN WIND SPEED CORRECTION if(wscdata=="INMET"){ ########################################################## ##### INMET wind speed correction ######################## ########################################################## stations<-read.table(paste(dirinmetmeta,"/stations_meta_data.csv",sep=""),sep=";",header=T,stringsAsFactors=F) # remove first and last two because they are not in Brasil stations <- stations[2:(length(stations[,1])-2),] # extract longitudes and latitudes statlons <- stations$lon statlats <- stations$lat rm(stations) ppINdistance <- 6378.388*acos(sin(rad*pplat) * sin(rad*statlats) + cos(rad*pplat) * cos(rad*statlats) * cos(rad*statlons-rad*pplon)) # only use if within maximum distance if((min(ppINdistance)<INmaxdist)|((applylim==0)&(windparks[ind,]$name %in% sel_windparks$name))){ # find data of nearest station statn <- which(ppINdistance==min(ppINdistance)) final_data <- readINMET(statn,date.start) # get wind speed data of nearest MERRA point (global long and lat variables have changed in readINMET function) date.start <- as.POSIXct("1999-01-01",tz="UTC") lldo <<- distanceorder() windMER10m <- NNdf(method,10) wind_df <- data.frame(final_data$dates1,final_data$wind1,windMER10m[1:length(final_data$wind1),2]) wind_df <- na.omit(wind_df) wind_df_r <- rmrows(wind_df,120,2) cf <- mean(wind_df_r[,2])/mean(wind_df_r[,3]) cfs_mean[[ind]] <<- cf }else{ cf <- 1 cfs_mean[[ind]] <<- 1 } # adapt mean wind speed NNmer[,2] <- NNmer[,2]*cf }else{ ########################################################## ##### WIND ATLAS wind speed correction ################### ########################################################## load(paste(dirwindatlas,"/wind_atlas.RData",sep="")) ppWAdistance <- 6378.388*acos(sin(rad*pplat) * sin(rad*windatlas[,2]) + cos(rad*pplat) * cos(rad*windatlas[,2]) * cos(rad*windatlas[,1]-rad*pplon)) # find data of nearest station pointn <- which(ppWAdistance==min(ppWAdistance)) long <<- windatlas[pointn,1] lat <<- windatlas[pointn,2] # get wind speed data of nearest MERRA point (at 50m height! as wind atlas data) lldo <<- distanceorder() windMER50m <- NNdf(method,50) cf <- as.numeric(windatlas[pointn,3])/mean(windMER50m[,2]) cfs_mean[[ind]] <<- cf # adapt mean wind speed NNmer[,2] <- NNmer[,2]*cf } # cut-out wind speed: 25 m/s (-> set everything above to 0) NNmer[which(NNmer[,2]>25),2] <- 0 # calculate power output for all hours from power curve in kWh # values are interpolated linearly betweer points of power curve # Ryberg power curve model RybCoeff <- read.csv(paste0(ryberg_path,"/ryberg_coeff.csv"),sep=",") names(RybCoeff) <- c("CF","A","B") v <- mapply(function(A,B) exp(A+B*log(windparks$sp[ind])), RybCoeff$A, RybCoeff$B) # calculate power output statpower <- as.data.frame(approx(x=c(0,v,100),y=c(0,RybCoeff$CF/100,1)*windparks$cap[ind],xout=NNmer$vext)) # set production before commissioning 0 statpower$y[which(NNmer$date<windparks$comdate[ind])] <- 0 # add to results statpowlist[[ind]] <- data.frame(NNmer[,1],statpower$y) } return(statpowlist) } # monthly correction # function is given data frame with dates, INMET and MERRA wind speeds # monthly sums of INMET and MERRA wind speeds are calculated # missing data are removed in both datasets # 12 monthly correction factors are calculated # function returns list of [1] correlation of corrected MERRa data with INMET data # and [2] correction factors with columns (1) month (2) cf # in case wind speeds for one month are missing in the dataset, the correction factor results in 1 corrm <- function(wind_df){ # extract month from date listmon=month(wind_df[,1]) wind_df<-data.frame(listmon,wind_df[,2:3]) names(wind_df)<-c("month","windIN","windMER") agINm<-aggregate(wind_df$windIN,by=list(wind_df$month),sum) agMERm<-aggregate(wind_df$windMER,by=list(wind_df$month),sum) cfm <- data.frame(c(1:12),1) cfm[match(agINm[,1],cfm[,1]),2] <- as.vector(unlist(agINm[2]/agMERm[2])) names(cfm)<-c("month","cf") #monthly corrected Wind MERRA cwindMERm <- wind_df$windMER*cfm[wind_df$month,2] corm <- cor(wind_df$windIN,cwindMERm) return(list(corm,cfm)) } # hourly and monthly correction # function is given data frame with dates, INMET and MERRA wind speeds # monthly and hourly sums of INMET and MERRA wind speeds are calculated # missing data are removed in both datasets # 12*24 monthly and hourly correction factors are calculated # function returns list of [1] correlation of corrected MERRa data with INMET data # and [2] correction factors with columns (1) month (2) cf # in case wind speeds for one hour or month are missing in the dataset, the correction factor results in 1 corrhm <- function(wind_df){ gc() listh <- hour(wind_df[,1]) listmon <- month(wind_df[,1]) listhm <- format(wind_df[,1],"%m%H") wind_df <- data.frame(listh,listmon,listhm,wind_df[,2:3]) names(wind_df) <- c("hour","month","monthhour","windIN","windMER") agINmh<-aggregate(wind_df$windIN,by=list(wind_df$monthhour),sum)[,2] agMERmh<-aggregate(wind_df$windMER,by=list(wind_df$monthhour),sum)[,2] mh <- aggregate(wind_df$windIN,by=list(wind_df$monthhour),sum)[,1] listcfmh <- agINmh/agMERmh m <- rep(c("01","02","03","04","05","06","07","08","09","10","11","12"),each=24) h <- rep(c("00","01","02","03","04","05","06","07","08","09","10","11","12","13","14","15","16","17","18","19","20","21","22","23"),12) mh1 <- NULL for(i in c(1:288)){mh1[i] <- paste(m[i],h[i],sep="")} dfcfmh <- data.frame(mh1,1) dfcfmh[match(mh,dfcfmh$mh1),2] <- listcfmh cfmh <- as.data.frame(matrix(dfcfmh[,2],nrow=24,ncol=12)) names(cfmh) <- c(1:12) # replaced loop with faster method listhm <- data.frame(listh+1,listmon) listcf <- cfmh[as.matrix(listhm)] cwindMERmh <- wind_df$windMER*listcf cormh <- cor(wind_df$windIN,cwindMERmh) return(list(cormh,cfmh)) } # function for calculating correction factors for all stations # mean correction and removal of long time series of same values added # int.method determines the method for interpolation # 1 ... Nearest Neighbour # 2 ... Bilinear Interpolation # 3 ... Bicubic Interpolation # 4 ... Inverse Distance weighting calccfs_r <- function(int.method=1){ date.start<- as.POSIXct("1999-01-01", tz="UTC") setwd(dirinmetmeta) stations1<-read.table("stations_meta_data.csv",sep=";",header=T,stringsAsFactors=F) # remove first and last two because they are not in Brasil stations1 <- stations1[2:(length(stations1[,1])-2),] # remove stations that have time series with poor quality load(paste(dirresults,"/rmdf_",mindaynum,"_",minmonth,"_",monthlim,".RData",sep=""),envir=.GlobalEnv) stations_r <- stations1[rmdf$statn[which(rmdf$rr<=monthlim)],] # correlation lists corm_r <<- list() corhm_r <<- list() # correction factor lists cfm_r <<- list() cfhm_r <<- list() # read INMET data for all stations and calculate correction factors stations <<- stations_r for(i in c(1:length(stations[,1]))){ if(i%%10 == 0){print(i)} statn <<- i final_data <- readINMET(statn,date.start) lldo <<- distanceorder() # interpolation to be used for further calculations MWH1 <- NNdf(int.method) vwM <-MWH1[,2] # calculation of correlation without correction wind_df <- data.frame(final_data$dates1,final_data$wind1,vwM[1:length(final_data$wind1)]) wind_df <- na.omit(wind_df) wind_df_r <- rmrows(wind_df,120,2) # remove short months (less than shortmonths days) ym_r <- as.numeric(format(wind_df_r[,1],"%Y%m")) wind_df_r_mrm <- rmrows_small(data.frame(ym_r,wind_df_r),shortmonths*24,1) wind_df_r_mrm <- wind_df_r_mrm[,2:4] # calculation of monthly correlation and correction factors cm <- corrm(wind_df_r_mrm) cfm_r[[i]] <<- cm[[2]] corm_r[[i]] <<- cm[[1]] # calculation of hourly and monthly correlation and correction factors chm <- corrhm(wind_df_r_mrm) cfhm_r[[i]] <<- chm[[2]] corhm_r[[i]] <<- chm[[1]] rm(MWH1,vwM) } } # function which removes rows of less than len same entries occuring in col column in dataframe x rmrows_small <- function(x,len,col){ lengths <- data.frame(num=rle(x[,col])$lengths,cum=cumsum(rle(x[,col])$lengths)) whichs <- which(lengths$num<len) rmdf[statn,2] <<- length(whichs) lengths <- rbind(c(0,0),lengths) if(length(whichs)>0){ rowrm <- NULL for(i in c(1:length(whichs))){ rowrm <- c(rowrm,c((lengths[whichs[i],2]+1):(lengths[whichs[i]+1,2]))) } y <- x[-c(rowrm),] }else{ y=x } return(y) } # function for cleaning INMET data # minmonth defines at least how many months need to be "complete" # mindaynum defines the number of days which is sufficient for a month to be complete # monthlim defines how many of the 12 months are allowed to have less than minmonth months with mindaynum days (1 for february) # shortmonths defines the minimum number of days that a month must contain in ordner not to be removed from the data # rmrows defines whether long rows (5 or more days) of same wind speeds shall be removed as they are considered error in the data remove_months <- function(minmonth,mindaynum,monthlim,shortmonths,rmrows){ date.start=as.POSIXct("1999-01-01 00:00:00",tz="UTC") setwd(dirinmetmeta) stations <<-read.table("stations_meta_data.csv",sep=";",header=T,stringsAsFactors=F) # remove first and last two because they are not in Brazil stations <<- stations[2:(length(stations[,1])-2),] # r: how many months are removed because they are too short? (less than 5 days) # rr: how many months have less than minmonth full months? rmdf <<- data.frame(statn=c(1:length(stations[,2])),r=rep(0,length(stations[,1])),rr=rep(0,length(stations[,1]))) for(statn in c(1:length(stations[,1]))){ statn <<-statn final_data <- readINMET(statn,date.start) wind_df <- na.omit(final_data) if(rmrows>0){wind_df_r <- rmrows(wind_df,120,2)}else{wind_df_r <- wind_df} ym <- as.numeric(format(wind_df_r[,1],"%Y%m")) wind_df_rr <- rmrows_small(data.frame(ym,wind_df_r),shortmonths*24,1) wind_df_rr <- wind_df_rr[,-c(1)] ym <- as.numeric(format(wind_df_rr[,1],"%Y%m")) df <- data.frame(rle(ym)$lengths,rle(ym)$values) # only select full months df <- df[which(df[,1]>=mindaynum*24),] # get full months m <- (df[,2])%%100 m <- m[order(m)] genugmon <- which(rle(m)$lengths>=minmonth) rmdf[statn,3] <- 12-length(genugmon) } setwd(dirresults) save(rmdf,file=paste("rmdf_",mindaynum,"_",minmonth,"_",monthlim,if(rmrows==0){"normr"},".RData",sep="")) } # function to calculate power generated in one location with mean wind speed approximation # limits for use of INMET data: distance limit (INmaxdist) - maximum allowed distance to closest INMET station # and correlation limit (corrlimit) - minimum correlation after correction required # method: interpolation method to use (1:NN,2:BLI,4:IDW, no BCI because useless) # mhm: parameter which defines the type of correction to apply (hourly and monthly "hm" or only monthly "m") # applylim: optionally leave out applying the limits (distance an correlation) - relevant for comparison of particular wind parks calcstatpower_windcor <- function(INmaxdist=0,corrlimit=0,method=1,mhm=0,applylim=1){ # load locations of wind speed measurement stations stations<-read.table(paste(dirinmetmeta,"/stations_meta_data.csv",sep=""),sep=";",header=T,stringsAsFactors=F) # remove first and last two because they are not in Brasil stations <- stations[2:(length(stations[,1])-2),] # remove stations that have time series with poor quality load(paste(dirresults,"/rmdf_",mindaynum,"_",minmonth,"_",monthlim,".RData",sep="")) stations <- stations[rmdf$statn[which(rmdf$rr<=monthlim)],] # extract longitudes and latitudes statlons <- stations$lon statlats <- stations$lat rm(stations) # load correction factors load(paste(dirresults,"/cfscors_r.RData",sep="")) # load wind atlas correction factors load(paste0(dirresults,"/cfs_WA.RData")) load(paste(dirwindparks,"/windparks_complete.RData",sep="")) windparks <- data.frame(windparks,comdate=as.POSIXct(paste(windparks$year,"-",windparks$month,"-",windparks$day," 00:00:00",sep=""),tz="UTC")) windparks <- windparks[which(windparks$comdate < as.POSIXct("2017-08-31 00:00:00",tz="UTC")),] windparks$comdate[which(windparks$comdate<date.start)] <- date.start # extract information on wind turbine type, rotor diameter, capacity of wind turbines, and number of installed turbines # turbine type ind_type <- which(!is.na(windparks$turbines)) windparks$type <- NA windparks$type[ind_type] <- sapply(strsplit(sapply(strsplit(windparks$turbines[ind_type],":"),"[[",2),"[(]"),"[[",1) # rotor diameter ind_diam <- ind_cap <- which(unlist(lapply(strsplit(windparks$turbines,"power"),length))==2) windparks$diam <- NA windparks$diam[ind_diam] <- as.numeric(sapply(strsplit(sapply(strsplit(windparks$turbines[ind_diam],"diameter"),"[[",2),"m\\)"),"[[",1)) # capacity windparks$tcap <- NA windparks$tcap[ind_diam] <- as.numeric(sapply(strsplit(sapply(strsplit(windparks$turbines[ind_diam],"power"),"[[",2),"kW"),"[[",1)) # number of turbines nturb <- gsub("turbines","",sapply(strsplit(windparks$turbines,":"),"[[",1)) nturb <- gsub("turbine","",nturb) nturb <- as.numeric(gsub("Turbine\\(s\\)","",nturb)) windparks$n <- nturb # fill in missing information with mean number of turbines windparks$n[is.na(windparks$n)] <- mean(as.numeric(windparks$n),na.rm=TRUE) # add specific turbine power (in W, for using Ryberg power curve model) # see https://doi.org/10.1016/j.energy.2019.06.052 windparks$sp <- windparks$tcap*1000/(windparks$diam^2/4*pi) # fill in missing information with mean specific power weighted by number of turbines and year ind_sp <- which(!is.na(windparks$sp)) yearly_sp <- aggregate((windparks$sp*windparks$n)[ind_sp],by=list(year(windparks$comdate)[ind_sp]),mean) yearly_sp[,2] <- yearly_sp[,2]/aggregate(windparks$n[ind_sp],by=list(year(windparks$comdate)[ind_sp]),mean)[,2] windparks$sp[is.na(windparks$sp)] <- yearly_sp[match(year(windparks$comdate),yearly_sp[,1])[is.na(windparks$sp)],2] # add hypothetical hubheight (is not included in dataset but linear function to estimate it from rotor diamter was fitted from US wind turbine database) windparks$hh <- 0.4761*windparks$diam + 36.5295 # fill in missing values with mean hh weighted by number of turbines and year ind_hh <- which(!is.na(windparks$hh)) yearly_hh <- aggregate((windparks$hh*windparks$n)[ind_hh],by=list(year(windparks$comdate)[ind_hh]),mean) yearly_hh[,2] <- yearly_hh[,2]/aggregate(windparks$n[ind_hh],by=list(year(windparks$comdate)[ind_hh]),mean)[,2] windparks$hh[is.na(windparks$hh)] <- yearly_hh[match(year(windparks$comdate),yearly_hh[,1])[is.na(windparks$hh)],2] statpowlist <- list() # list to see which ones were corrected corlist <- rep(0,length(windparks[,1])) for(ind in c(1:length(windparks[,1]))){ if(ind%%10==0){print(ind)} pplon <- windparks$long[ind] pplat <- windparks$lat[ind] # find nearest neightbour MERRA and extrapolate to hubheight long <<- pplon lat <<- pplat date.start <- as.POSIXct("2006-01-01", tz="UTC") lldo <<- distanceorder() NNmer <- NNdf(method,windparks$hh[ind]) # find nearest wind measurement station ppINdistance <- 6378.388*acos(sin(rad*pplat) * sin(rad*statlats) + cos(rad*pplat) * cos(rad*statlats) * cos(rad*statlons-rad*pplon)) # only use if within maximum distance if((min(ppINdistance)<INmaxdist)|applylim==0){ # find data of nearest station statn <- which(ppINdistance==min(ppINdistance)) # correct wind speeds if((mhm=="hm")&&((corhm_r[[statn]]>=corrlimit)|applylim==0)){ corlist[ind] <- 1 h <- hour(NNmer[,1]) m <- month(NNmer[,1]) listhm <- data.frame(h+1,m) listcf <- cfhm_r[[statn]][as.matrix(listhm)] cwindMER <- NNmer[,2]*listcf wsc <- data.frame(time=NNmer[,1],ws=cwindMER) }else if((mhm=="m")&&((corm_r[[statn]]>=corrlimit)|applylim==0)){ corlist[ind] <- 1 m <- month(NNmer[,1]) listcf <- cfm_r[[statn]][m,2] cwindMER <- NNmer[,2]*listcf wsc <- data.frame(time=NNmer[,1],ws=cwindMER) }else{ wsc <- NNmer } # then apply wind atlas mean wind speed correction wsc[,2] <- wsc[,2]*cfs_mean[[ind]] names(wsc) <- c("time","ws") }else{ # only apply wind atlas wind speed correction wsc <- NNmer wsc[,2] <- wsc[,2]*cfs_mean[[ind]] names(wsc) <- c("time","ws") } # cut-out wind speed: 25 m/s (-> set everything above to 0) wsc[which(wsc[,2]>25),2] <- 0 # calculate power output for all hours from power curve in kWh # values are interpolated linearly betweer points of power curve # Ryberg power curve model RybCoeff <- read.csv(paste0(ryberg_path,"/ryberg_coeff.csv"),sep=",") names(RybCoeff) <- c("CF","A","B") v <- mapply(function(A,B) exp(A+B*log(windparks$sp[ind])), RybCoeff$A, RybCoeff$B) # calculate power output statpower <- as.data.frame(approx(x=c(0,v,100),y=c(0,RybCoeff$CF/100,1)*windparks$cap[ind],xout=wsc[,2])) # set production before commissioning 0 statpower$y[which(NNmer$date<windparks$comdate[ind])] <- 0 # add to results statpowlist[[ind]] <- data.frame(NNmer[,1],statpower$y) } save(corlist,file=paste0(dirresults,"/corlist_wsc_",mhm,".RData")) return(statpowlist) } # function for writing data frames into csv # adapted from the function write_dataframes_to_csv from the package sheetr # df_list is list of dataframes that shall be written to file # file is filename with path write.list2 <- function (df_list, file) { headings <- names(df_list) seperator <- paste(replicate(50, "_"), collapse = "") sink(file) for (df.i in seq_along(df_list)) { if (headings[df.i] != "") { cat(headings[df.i]) cat("\n") } write.table(df_list[[df.i]],sep=";") cat(seperator) cat("\n\n") } sink() return(file) }
48f69697baee385eb87b1b142e34fe829339568e
bbe5ae041a1f8bde6fe00fa6c53b02d9f329983d
/R-Deep-Task1-CLEAN.R
64cf84fb107ec8a2fa73c77622f5fcc35f3c9b75
[]
no_license
ChristianTorrens/DeepAnalysis1
bac8c0a06f691de1b080e202c6680ff5112b3b75
88f8382aa61ebac7ef1213b34353cd3c226ddbdb
refs/heads/master
2020-03-29T21:42:39.568032
2018-10-03T08:00:51
2018-10-03T08:00:51
150,382,644
0
0
null
null
null
null
UTF-8
R
false
false
27,677
r
R-Deep-Task1-CLEAN.R
#https://en.selectra.info/energy-france/guides/electricity-cost# #Mutate etc# #https://jules32.github.io/2016-07-12-Oxford/dplyr_tidyr/# #### Installing and Calling libraries#### install.packages("chron") source("https://raw.githubusercontent.com/iascchen/VisHealth/master/R/calendarHeat.R") devtools::install_github("hrbrmstr/taucharts") source("calendarHeat.R") install.packages("chron") library("chron") install.packages("tidyr") install.packages("lubridate") install.packages("hydroTSM") install.packages("ggplot2") install.packages("bdvis") install.packages("magrittr") install.packages("RColorBrewer") install.packages("grid") install.packages("zoo") install.packages("padr") install.packages("ggalt") install.packages("taucharts") install.packages("doBy") install.packages("highcharter") library(highcharter) library(doBy) library(taucharts) library(ggalt) library(padr) library(zoo) library(caret) library(tidyr) library(dplyr) library(timeDate) library(lubridate) library(readr) library(hydroTSM) library(ggplot2) library(caret) library(magrittr) library(RColorBrewer) library(chron) library(bdvis) library(grid) #### Importing DataSet#### options(digits=5) setwd("~/Dropbox/Ubiqum Master/Deep Analytics and Visualization/Task1_DefineDataScienceProject") household <- read.csv("~/Dropbox/Ubiqum Master/Deep Analytics and Visualization/Task1_DefineDataScienceProject/household_power_consumption.txt", header=TRUE,sep=";",na.strings = c("?")) View(household) ####Dplyr DateTime#### household$Date<- as.character(household$Date) household$Time<- as.character(household$Time) household$DateTime<- paste(household$Date, household$Time) household$DateTime <- dmy_hms(household$DateTime) household$DateTime <- with_tz(household$DateTime, "Europe/Paris") #household$DateTime <- strptime(household$DateTime, "%d/%m/%Y %H:%M:%S", tz= "Europe/Paris") #household$Date<-as.POSIXct(household$DateTime,tz= "Europe/Paris" ) str(household) ####APPLY DAYLIGHT SAVINGS#### #https://cran.r-project.org/web/packages/padr/vignettes/padr_implementation.html #Define a time Period# SET_2007_SummerTime <- interval(ymd_hms('2007-03-25 2:00:00'), ymd_hms('2007-10-28 2:59:00')) SET_2008_SummerTime <- interval(ymd_hms('2008-03-25 2:00:00'), ymd_hms('2008-10-28 2:59:00')) SET_2009_SummerTime <- interval(ymd_hms('2009-03-25 2:00:00'), ymd_hms('2009-10-28 2:59:00')) SET_2010_SummerTime <- interval(ymd_hms('2010-03-25 2:00:00'), ymd_hms('2010-10-28 2:59:00')) SET_2011_SummerTime <- interval(ymd_hms('2011-03-25 2:00:00'), ymd_hms('2011-10-28 2:59:00')) ####THE NEW FUNCTION WAS TAKING WAS TOO LONG TO RUN#### #Daylight_func<- function(dia){ #if( dia %within% SET_2007_SummerTime) { household$DateTime <- with_tz(household$DateTime,tz= "CEST")} #if( dia %within% SET_2007_WinterTime) {household$DateTime <- with_tz(household$DateTime,tz= "CET")} #if ( dia %within% SET_2008_SummerTime) { household$DateTime <- with_tz(household$DateTime,tz= "CEST")} #if( dia %within% SET_2008_WinterTime) {household$DateTime <- with_tz(household$DateTime,tz= "CET")} #if ( dia %within% SET_2009_SummerTime) { household$DateTime <- with_tz(household$DateTime,tz= "CEST")} #if( dia %within% SET_2009_WinterTime) {household$DateTime <- with_tz(household$DateTime,tz= "CET")} #if ( dia %within% SET_2010_SummerTime) { household$DateTime <- with_tz(household$DateTime,tz= "CEST")} #if( dia %within% SET_2010_WinterTime) {household$DateTime <- with_tz(household$DateTime,tz= "CET")} #if ( dia %within% SET_2011_SummerTime) { household$DateTime <- with_tz(household$DateTime,tz= "CEST")} #} #household$DateTime <- sapply(household$DateTime, Daylight_func) household$SDateTime <- if(( household$DateTime %in% SET_2007_SummerTime) | (household$DateTime %in% SET_2008_SummerTime) | (household$DateTime %in% SET_2009_SummerTime) |(household$DateTime %in% SET_2010_SummerTime)){household$DateTime +dhours(1)} else{household$DateTime +dhours(0)} #Now the times changed into seconds# ####CHANGING BACK THE FORMAT OF DATE TIME#### household$DateTime<- as.POSIXct(household$DateTime, origin = "1970-01-01") ####Checking that Changes summer to winter Hours and winter to Summer were properly made#### household[141577,]$DateTime #Ok household[141576,]$DateTime as.POSIXct(1193536800, origin = "1970-01-01", tz = "Europe/Paris") #To see what the Seconds strip of time means in Date# as.POSIXct(1193536860, origin = "1970-01-01", tz = "Europe/Paris") #https://stat.ethz.ch/R-manual/R-devel/library/base/html/as.POSIXlt.html ####DAYLIGHT SAVINGS ONLY WORKING ON THE FIRST CHANFE which(household$DateTime == "2008-03-25 2:00:00") household[668617,]$DateTime household[668618,]$DateTime which(household$DateTime == "2008-10-28 3:00:00") household[981157,]$DateTime household[,]$DateTime which(household$DateTime == "2009-03-25 2:00:00") household[1194217,]$DateTime household[1194240,]$DateTime as.POSIXct(1237944180, origin = "1970-01-01", tz = "Europe/Paris") which(household$DateTime == "2009-10-28 3:00:00") household[1506757,]$DateTime household[,]$DateTime which(household$DateTime == "2010-03-25 2:00:00") household[1719817,]$DateTime household[,]$DateTime which(household$DateTime == "2010-10-28 3:00:00") household[2032297,]$DateTime household[,]$DateTime ####Finding NAs#### MatrixOfNAs<- filter(household, is.na(Global_active_power)) RowsOfNAs<- which(is.na(household$Global_active_power)) write.csv(RowsOfNAs, "typeofna.csv") sum(is.na(household)) ####Replacing NA´s of less than 3 hours. THOSE THAT ARE JUST ENERGY CUTS#### household$Global_active_power<- na.locf(household$Global_active_power, na.rm = FALSE, fromLast = FALSE, maxgap = 180) sum(is.na(household$Global_active_power))#to see how many NA household$Global_reactive_power<- na.locf(household$Global_reactive_power, na.rm = FALSE, fromLast = FALSE, maxgap = 180) household$Voltage<- na.locf(household$Voltage, na.rm = FALSE, fromLast = FALSE, maxgap = 180) household$Global_intensity<- na.locf(household$Global_intensity, na.rm = FALSE, fromLast = FALSE, maxgap = 180) household$Sub_metering_1<- na.locf(household$Sub_metering_1, na.rm = FALSE, fromLast = FALSE, maxgap = 180) household$Sub_metering_2<- na.locf(household$Sub_metering_2, na.rm = FALSE, fromLast = FALSE, maxgap = 180) household$Sub_metering_3<- na.locf(household$Sub_metering_3, na.rm = FALSE, fromLast = FALSE, maxgap = 180) ####Replacing rest of NA´s of less than 3 hours. THOSE THAT ARE JUST ENERGY CUTS#### household$Global_active_power[is.na(household$Global_active_power)]<-0 sum(is.na(household$Global_active_power)) household$Global_reactive_power[is.na(household$Global_reactive_power)]<-0 household$Global_intensity[is.na(household$Global_intensity)]<-0 household$Voltage[is.na(household$Voltage)]<-0 household$Sub_metering_1[is.na(household$Sub_metering_1)]<-0 household$Sub_metering_2[is.na(household$Sub_metering_2)]<-0 household$Sub_metering_3[is.na(household$Sub_metering_3)]<-0 sum(is.na(household)) ####Columns with SAME ENERGY MEASURING METRICS#### household<-household %>% mutate(Global_ConsumptionKWh=((household$Global_active_power)/60)) household<-household %>% mutate(Global_Consumption_reactiveKWh=((household$Global_reactive_power)/60)) household<-household %>% mutate(Submetter1_kwh=(household$Sub_metering_1/1000)) household<-household %>% mutate(Submetter2_kwh=(household$Sub_metering_2/1000)) household<-household %>% mutate(Submetter3_kwh=(household$Sub_metering_3/1000)) ####Create Month,Week, Day, WeekDay,Season column#### household$Hora <- hour(household$DateTime) sum(is.na(household$Hora)) household$Mes <- month(household$Date) sum(is.na(household$Mes)) household$Semana <- week(household$DateTime) sum(is.na(household$Semana)) household$Dia <- day(household$DateTime) sum(is.na(household$Dia)) household$DiaSemana <- wday(household$DateTime, label = TRUE, abbr = FALSE) sum(is.na(household$DiaSemana)) household$Season<- quarter(household$DateTime) sum(is.na(household$Season)) ###Creating a Column with the Names of the Season### household$SeasonWNames <-"" household$SeasonWNames[household$Season == "1"] <- "Winter" household$SeasonWNames[household$Season == "2"] <- "Spring" household$SeasonWNames[household$Season == "3"] <- "Summer" household$SeasonWNames[household$Season == "4"] <- "Fall" View(household) sum(is.na(household$SeasonWNames)) ####Creating a Year columne### household$Any<- year(household$DateTime) sum(is.na(household$Any)) ####LINEAR CORRELATION BETWEEN ATTRIBUTES TO DELETE SOME ALREADY AND SIMPLIFY ALL COMPUTING PROCESSES#### #TO DO HERE: Correlation Matrix# ####CREATING A HOUSEHOLD 2 DATASET#### #1s Merge the hours to reduce input# household2<- household #creating a new exact subset to be able to step backwards easily if needed household2$Global_active_power<- NULL household2$Global_reactive_power<- NULL household2$Global_intensity<- NULL household2$Voltage<- NULL household2$Sub_metering_1<- NULL household2$Sub_metering_2<- NULL household2$Sub_metering_3<- NULL View(household2) str(household2) ####HOUSEHOLD HISTOGRAM PER SEASON #### householdSeason<- household2 householdSeason<- householdSeason%>% group_by(SeasonWNames)%>% mutate(SumaSeasonKW = sum(Global_ConsumptionKWh)) householdSeason<- householdSeason%>% group_by(SeasonWNames)%>%mutate(SumaSeasonReactiveKW = sum(Global_Consumption_reactiveKWh)) householdSeason<- householdSeason%>% group_by(SeasonWNames)%>%mutate(SumaSeasonKitchenKW = sum(Submetter1_kwh)) householdSeason<- householdSeason%>% group_by(SeasonWNames)%>%mutate(SumaSeasonLaundryKW = sum(Submetter2_kwh)) householdSeason<- householdSeason%>% group_by(SeasonWNames)%>%mutate(SumaSeasonHeaterKW = sum(Submetter3_kwh)) householdSeasonGood<- householdSeason householdSeasonGood<- select(householdSeasonGood, Any, SeasonWNames,SumaSeasonKW,SumaSeasonReactiveKW,SumaSeasonLaundryKW,SumaSeasonHeaterKW ) householdSeasonGood<- distinct(householdSeasonGood) View(householdSeasonGood) ####QUICK PLOTTING OF TOTAL CONSUMPTION PER SEASON### #http://www.cookbook-r.com ggplot(data=householdSeasonGood, aes(x=SeasonWNames, y=SumaSeasonKW, fill=Any)) + facet_wrap( ~ Any)+ geom_bar(stat="identity", position=position_dodge()) ####HOUSEHOLD HISTOGRAM PER MONTH #### householdMonth<- household2 householdMonth<- householdMonth%>% group_by(Mes)%>% mutate(SumaMonthKW = sum(Global_ConsumptionKWh)) householdMonth<- householdMonth%>% group_by(Mes)%>%mutate(SumaMonthReactiveKW = sum(Global_Consumption_reactiveKWh)) householdMonth<- householdMonth%>% group_by(Mes)%>%mutate(SumaMonthKitchenKW = sum(Submetter1_kwh)) householdMonth<- householdMonth%>% group_by(Mes)%>%mutate(SumaMonthLaundryKW = sum(Submetter2_kwh)) householdMonth<- householdMonth%>% group_by(Mes)%>%mutate(SumaMonthHeaterKW = sum(Submetter3_kwh)) householdMonthGood<- householdMonth householdMonthGood$MonthWNames <-"" householdMonthGood$MonthWNames[household$Mes == "1"] <- "Jan" householdMonthGood$MonthWNames[household$Mes == "2"] <- "Feb" householdMonthGood$MonthWNames[household$Mes == "3"] <- "Mar" householdMonthGood$MonthWNames[household$Mes == "4"] <- "Apr" householdMonthGood$MonthWNames[household$Mes == "5"] <- "May" householdMonthGood$MonthWNames[household$Mes == "6"] <- "Jun" householdMonthGood$MonthWNames[household$Mes == "7"] <- "Jul" householdMonthGood$MonthWNames[household$Mes == "8"] <- "Aug" householdMonthGood$MonthWNames[household$Mes == "9"] <- "Sep" householdMonthGood$MonthWNames[household$Mes == "10"] <- "Oct" householdMonthGood$MonthWNames[household$Mes == "11"] <- "Nov" householdMonthGood$MonthWNames[household$Mes == "12"] <- "Dec" householdMonthGood<- select(householdMonthGood, Any, MonthWNames, Mes, SumaMonthKW,SumaMonthReactiveKW,SumaMonthLaundryKW,SumaMonthHeaterKW ) householdMonthGood<- distinct(householdMonthGood) View(householdMonthGood) ####QUICK PLOTTING OF TOTAL CONSUMPTION PER SEASON### #http://www.cookbook-r.com ggplot(data=householdMonthGood, aes(x=MonthWNames, y=SumaMonthKW, fill=Any)) + facet_wrap( ~ Any)+ geom_bar(stat="identity", position=position_dodge()) ggplot(data=householdMonthGood, aes(x=MonthWNames , y=SumaMonthKW, group=Any, colour=Any)) + geom_line()+theme_bw()+ geom_point()+facet_wrap(facets = Any ~ .)#, margins = FALSE) ####HOUSEHOLD HISTOGRAM PER WEEK #### householdWeek<- household2 householdWeek<- householdWeek%>% group_by(Semana)%>% mutate(SumaWeekKW = sum(Global_ConsumptionKWh)) householdWeek<- householdWeek%>% group_by(Semana)%>%mutate(SumaWeekReKW = sum(Global_Consumption_reactiveKWh)) householdWeek<- householdWeek%>% group_by(Semana)%>%mutate(SumaWeekKitchenKW = sum(Submetter1_kwh)) householdWeek<- householdWeek%>% group_by(Semana)%>%mutate(SumaWeekLaundryKW = sum(Submetter2_kwh)) householdWeek<- householdWeek%>% group_by(Semana)%>%mutate(SumaWeekHeaterKW = sum(Submetter3_kwh)) householdWeekGood<- householdWeek householdWeekGood<- select(householdWeekGood, Any, Semana, Mes, SumaWeekKW,SumaWeekReKW,SumaWeekLaundryKW,SumaWeekHeaterKW ) householdWeekGood<- distinct(householdWeekGood) View(householdWeekGood) unique(householdWeekGood$Semana) ####QUICK PLOTTING OF TOTAL CONSUMPTION PER SEASON### #http://www.cookbook-r.com ggplot(data=householdWeekGood, aes(x=Semana, y=SumaWeekKW, fill=Any)) + facet_wrap( ~ Any)+ geom_bar(stat="identity", position=position_dodge()) ggplot(data=householdWeekGood, aes(x= Semana, y=SumaWeekKW, group=Any, colour=Any)) + geom_line()+theme_bw()+ geom_point()+facet_wrap(facets = Any ~ .)#, margins = FALSE) ####HOUSEHOLD HISTOGRAM WEEKDAY/PER MONTH #### householdWDayMonth<- household2 householdWDayMonth<- householdWDayMonth%>% group_by(DiaSemana)%>% mutate(SumaWeekDayKW = sum(Global_ConsumptionKWh)) householdWDayMonth<- householdWDayMonth%>% group_by(DiaSemana)%>%mutate(SumaWeekDayReKW = sum(Global_Consumption_reactiveKWh)) householdWDayMonth<- householdWDayMonth%>% group_by(DiaSemana)%>%mutate(SumaWeekDayKitchenKW = sum(Submetter1_kwh)) householdWDayMonth<- householdWDayMonth%>% group_by(DiaSemana)%>%mutate(SumaWeekDayLaundryKW = sum(Submetter2_kwh)) householdWDayMonth<- householdWDayMonth%>% group_by(DiaSemana)%>%mutate(SumaWeekDayHeaterKW = sum(Submetter3_kwh)) householdWDayMonthGood<- householdWDayMonth householdWDayMonthGood<- select(householdWDayMonthGood, Any, DiaSemana, Mes, SumaWeekDayKW,SumaWeekDayReKW,SumaWeekDayKitchenKW,SumaWeekDayLaundryKW,SumaWeekDayHeaterKW ) householdWDayMonthGood<- distinct(householdWDayMonthGood) View(householdWDayMonthGood) unique(householdWDayMonthGood$DiaSemana) ####QUICK PLOTTING OF TOTAL CONSUMPTION PER SEASON### #http://www.cookbook-r.com ggplot(data=householdWDayMonthGood, aes(x= DiaSemana, y=SumaWeekDayKW, group= Mes, colour=Mes)) + geom_line()+theme_bw()+ geom_point()+facet_wrap(facets = Mes ~ .)#, margins = FALSE) ####Prep: Collapsing Consumptions into Hour. (Suma horas here: Called HouseholdHOURComplete) #### householdHOUR<- household2 householdHOUR<- householdHOUR%>% group_by(Any,Mes,Dia,Hora)%>% mutate(SumaHoraKW = sum(Global_ConsumptionKWh)) householdHOUR<- householdHOUR%>% group_by(Any,Mes,Dia,Hora)%>%mutate(SumaHoraReKW = sum(Global_Consumption_reactiveKWh)) householdHOUR<- householdHOUR%>% group_by(Any,Mes,Dia,Hora)%>%mutate(SumaKitchenKW = sum(Submetter1_kwh)) householdHOUR<- householdHOUR%>% group_by(Any,Mes,Dia,Hora)%>%mutate(SumaLaundryKW = sum(Submetter2_kwh)) householdHOUR<- householdHOUR%>% group_by(Any,Mes,Dia,Hora)%>%mutate(SumaHeaterKW = sum(Submetter3_kwh)) unique(householdHOUR$SumaHoraKW) unique(householdHOUR$SumaHoraReKW) unique(householdHOUR$SumaKitchenKW) unique(householdHOUR$SumaLaundryKW) unique(householdHOUR$SumaHeaterKW) householdHOURComplete<- householdHOUR str(householdHOUR) View(householdHOUR) householdHOUR<- select(householdHOUR, Any, Hora, Mes, Dia,DiaSemana,SumaHoraKW,SumaHoraReKW,SumaKitchenKW,SumaLaundryKW,SumaHeaterKW) householdHOUR<-distinct(householdHOUR) str(householdHOUR) View(householdHOUR) ####CHECKING PER HOUR WITHIN DAY OF WEEK#### #warning. Solve this! DIAMESHORA<- householdHOUR DIAMESHORA$DateTime<- format(as.POSIXct(DIAMESHORA$DateTime,format='%m/%d/%Y %H'),format='%m/%d/%Y') #To give DateTime a time format again DIAMESHORA<- select(DIAMESHORA,DateTime, Any, Hora, Mes, Dia,DiaSemana,SumaHoraKW,SumaHoraReKW,SumaKitchenKW,SumaLaundryKW,SumaHeaterKW) DIAMESHORA<- distinct(DIAMESHORA) DIAMESHORAGOOD<-DIAMESHORA3 DIAMESHORAGOOD<- DIAMESHORAGOOD%>% group_by(Any,Mes,Dia)%>% mutate(SumaDiaKW = sum(SumaHoraKW)) DIAMESHORAGOOD<- DIAMESHORAGOOD%>% group_by(Any,Mes,Dia)%>% mutate(SumaDiaReKW = sum(SumaHoraReKW)) DIAMESHORAGOOD<- DIAMESHORAGOOD%>% group_by(Any,Mes,Dia)%>% mutate(SumaDiaKitchen = sum(SumaKitchenKW)) DIAMESHORAGOOD<- DIAMESHORAGOOD%>% group_by(Any,Mes,Dia)%>% mutate(SumaDiaLaundry = sum(SumaLaundryKW)) DIAMESHORAGOOD<- DIAMESHORAGOOD%>% group_by(Any,Mes,Dia)%>% mutate(SumaDiaHeater = sum(SumaHeaterKW)) ####GRAPHS CONSUMPTION PER HOUR ON EACH DAY OF THE WEEK#### DIASEMANACTIVE<-household2%>% group_by(DiaSemana) %>% summarise(NewGlobalActive= sum(Global_ConsumptionKWh)) DIASEMANACTIVE DIASEMANHORAACTIVE<-household2%>% group_by(DiaSemana, Hora) %>% summarise(NewGlobalActive= sum(Global_ConsumptionKWh)) DIASEMANAHORACTIVE HORAACTIVE<-household2%>% group_by(Hora) %>% summarise(NewGlobalActive= sum(Global_ConsumptionKWh)) HORAACTIVE household2$HourKWSum<-household2%>% group_by(Hora) %>% mutate(ActivePerHour= sum(Global_ConsumptionKWh)) HORAACTIVE #2 Variables Bar plot ggplot(data=DIASEMANAHORAACTIVE, aes(x=DiaSemana, y=NewGlobalActive, fill = DiaSemana)) + geom_bar(stat="identity") #3 Variables Bar plot ggplot(data=DIASEMANHORAACTIVE, aes(x=Hora, y=NewGlobalActive, fill=DiaSemana)) + geom_bar(stat="identity", position=position_dodge()) ##ultiple Graphs #Line Graph WeekDay, Hour, Active Consumption# p1 <- ggplot(DIASEMANHORAACTIVE, aes(x=Hora, y=NewGlobalActive, colour=DiaSemana, group=DiaSemana)) + geom_line() p1 #Line Graph WeekDay, Hour, Active Consumption with trends# p2 <- ggplot(DIASEMANHORAACTIVE, aes(x=Hora, y=NewGlobalActive, colour=DiaSemana)) + geom_point(alpha=.3) + geom_smooth(alpha=.2, size=1) p2 ####365 Days per Year#### DIAMES<- householdHOURComplete DIAMES$DiaDelAny<-"" DIAMES$DiaDelAny<- strftime(DIAMES$DateTime, format = "%j", tz = "Europe/Paris")####to see which Day of the year it is DIAMES$DiaDelAny<-as.numeric(DIAMES$DiaDelAny) DIAMES<- DIAMES%>% group_by(DiaDelAny)%>% mutate(SumaDayKW = sum(SumaHoraKW)) DIAMES<- DIAMES%>% group_by(DiaDelAny)%>%mutate(SumaDayReKW = sum(SumaHoraReKW)) DIAMES<- DIAMES%>% group_by(DiaDelAny)%>%mutate(SumaDayKitchenKW = sum(SumaKitchenKW)) DIAMES<- DIAMES%>% group_by(DiaDelAny)%>%mutate(SumaDayLaundryKW = sum(SumaLaundryKW)) DIAMES<- DIAMES%>% group_by(DiaDelAny)%>%mutate(SumaDayHeaterKW = sum(SumaHeaterKW)) View(DIAMES) unique(DIAMES$SumaDayKW) unique(DIAMES$SumaDayReKW) unique(DIAMES$SumaDayKitchenKW) unique(DIAMES$SumaDayLaundryKW) unique(DIAMES$SumaDayHeaterKW) unique(DIAMES$DiaDelAny) DIAMES$SumaHoraKW<- NULL DIAMES$SumaHoraReKW<- NULL DIAMES$SumaKitchenKW<- NULL DIAMES$SumaLaundryKW<- NULL DIAMES$SumaHeaterKW<- NULL DIAMES$Hora<- NULL DIAMES$Time<- NULL DIAMES$DateTime<- NULL DIAMES$Global_ConsumptionKWh<- NULL DIAMES$Global_Consumption_reactiveKWh<- NULL DIAMES$Submetter1_kwh<- NULL DIAMES$Submetter2_kwh<- NULL DIAMES$Submetter3_kwh<- NULL DIAMES<- distinct(DIAMES) str(DIAMES) View(DIAMES) ggplot(data=DIAMES, aes(x=DiaDelAny , y=SumaDayKW, group=Any, colour=Any)) + geom_line()+theme_bw()+ geom_point()+facet_wrap(facets = Any ~ .)#, margins = FALSE) p1 <- ggplot(DIAMES, aes(x=DiaDelAny, y=SumaDayKW, colour=Any, group=Any)) + geom_line() p1 ####SET RULES OF OUTLIERS PER DAY#### boxplot(DIAMES$SumaDayKW) #Consumption Outlier Rule boxplot(DIAMES$SumaDayReKW) #Reactive Outlier Rule boxplot(DIAMES$SumaDayKitchenKW) #Kitchen Outlier Rule boxplot(DIAMES$SumaDayLaundryKW) #Laundry Outlier Rule boxplot(DIAMES$SumaDayHeaterKW) #Heater Outlier Rule ####DETECTING WHICH ARE THE ROWS OF THE OUTLIERS### #For Rective Power unique(DIAMES$SumaDayReKW) OutliersRE <- which(DIAMES$SumaDayReKW >= 1000) OutliersRE #Checking one of those days DIAMES[384,]$Date DIAMES[384,] #WANT TO CREATE A SUBSET WITH ALL THE INFO THOSE.. ####REACTIVE vs ACTIVE ENERGY#### ####ENERGY CONSUMPTION PER MONTH#### ggplot(data=household_MONTHYEAR, aes(household_MONTHYEAR$MonthAbb, group=1))+ #geom_line(aes(y = HPC_My2006$Submetter1_kwh, color="Kitchen")) + #geom_line(aes(y = HPC_My2006$Submetter2_kwh, color="Laundry Room")) + #geom_line(aes(y = HPC_My2006$Submetter3_kwh, color="Heater")) + geom_line(aes(y = household_MONTHYEAR$Global_ConsumptionKWh, color="Active_Power"))+ geom_line(aes(y = household_MONTHYEAR$Global_Consumption_reactiveKWh, color="Reactive_Power"))+ xlab("Year")+ ylab("KWh")+ ggtitle("Energy Consumption by Month")+ scale_y_continuous(labels = function(x) format(x, scientific =FALSE))+ facet_wrap( ~ Any) ####To make two Columns of graphs### http://www.cookbook-r.com/Graphs/Multiple_graphs_on_one_page_(ggplot2)/ #multiplot(p1, p2, p3, p4, cols=2) #> `geom_smooth()` using method = 'loess' #Submetering Graph# #plot(data_sub$DateTime, data_sub$Sub_metering_1, # "n", # xlab = "", #ylab = "Energy sub metering") #points(data_sub$DateTime, data_sub$Sub_metering_1, type = "line") #points(data_sub$DateTime, data_sub$Sub_metering_2, type = "line", col = "red") #points(data_sub$DateTime, data_sub$Sub_metering_3, type = "line", col = "blue") #legend("topright", #legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), #col = c("black", "red", "blue"), #lty = c(1, 1, 1)) #Line Graph WeekDay, Hour, Active Consumption with trends# # This must go after theme_b #####CONSUMPTION REACTIVE PER SUM### household_MONTHYEAR<-household2 %>% select(Any,Mes,Global_active_power, Global_reactive_power,Global_ConsumptionKWh ,Global_Consumption_reactiveKWh,Submetter3_kwh, Submetter2_kwh,Submetter1_kwh) %>% group_by(Any,Mes)%>% #Global_Consumption_reactive=sum(Global_Consumption_reactive)) summarise_at(vars(Global_ConsumptionKWh,Global_Consumption_reactiveKWh,Submetter3_kwh, Submetter2_kwh,Submetter1_kwh), funs(sum)) ####June per hour each Year#### DIAMESHORA3$SumaHoraKW<- NULL DIAMESHORA3$SumaHoraReKW<- NULL DIAMESHORA3$SumaKitchenKW<- NULL DIAMESHORA3$SumaLaundryKW<- NULL DIAMESHORA3$SumaHeaterKW<- NULL DIAMESHORA3$Hora<- NULL DIAMESHORA3$DateTime<- NULL DIAMESHORA3<- distinct(DIAMESHORA3) str(DIAMESHORA3) View(DIAMESHORA3) household3_hour<-household2 %>% select(Any,Mes,Hora,Global_ConsumptionKWh,Global_Consumption_reactiveKWh, Sub_metering_3,Submetter3_kwh, Submetter2_kwh,Submetter1_kwh) %>% filter(Mes==6 & Any!=2006) %>% group_by(Any,Mes,Hora)%>% #Global_Consumption_reactive=sum(Global_Consumption_reactive) summarise_at(vars(Global_ConsumptionKWh, Global_Consumption_reactiveKWh ,Submetter3_kwh,Submetter2_kwh,Submetter1_kwh),funs(sum)) ####Gives you the name of the month in the graphs as a factor#### household_MONTHYEAR<- transform(household_MONTHYEAR, MonthAbb = month.abb[Mes]) ####Puts them in order#### household_MONTHYEAR$MonthAbb <-factor(household_MONTHYEAR$MonthAbb, levels = c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec")) ####plots#### hist(household$Global_ConsumptionKWh, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts/h)") ggplot(data=household_MONTHYEAR, aes(x= Mes, y=Global_ConsumptionKWh, group=Any, colour=Any)) + geom_line()+theme_bw()+ geom_point()+facet_wrap(facets = Any ~ .)#, margins = FALSE) ####ENERGY CONSUMPTION PER MONTH#### ggplot(data=household_MONTHYEAR, aes(household_MONTHYEAR$MonthAbb, group=1))+ #geom_line(aes(y = HPC_My2006$Submetter1_kwh, color="Kitchen")) + #geom_line(aes(y = HPC_My2006$Submetter2_kwh, color="Laundry Room")) + #geom_line(aes(y = HPC_My2006$Submetter3_kwh, color="Heater")) + geom_line(aes(y = household_MONTHYEAR$Global_ConsumptionKWh, color="Active_Power"))+ geom_line(aes(y = household_MONTHYEAR$Global_Consumption_reactiveKWh, color="Reactive_Power"))+ xlab("Year")+ ylab("KWh")+ ggtitle("Energy Consumption by Month")+ scale_y_continuous(labels = function(x) format(x, scientific =FALSE))+ facet_wrap( ~ Any) #facet_grid(facets = Any ~ ., margins = FALSE) ####Calendar Heat#### library("chron") calendarHeat(household$Date, household$Global_Consumption_reactiveKWh, varname="Global_reActive_Power") summary(household) ####9#### ggplot(data=household3_My2006, aes(household3_My2006$MonthAbb,group=1))+ #geom_line(aes(y = HPC_My2006$Submetter1_kwh, color="Kitchen")) + #geom_line(aes(y = HPC_My2006$Submetter2_kwh, color="Laundry Room")) + #geom_line(aes(y = HPC_My2006$Submetter3_kwh, color="Heater")) + geom_line(aes(y = household3_My2006$Global_ConsumptionKWh, color="Active_Power"))+ geom_line(aes(y = household3_My2006$Global_reactiveKWh, color="Reactive_Power"))+ xlab("Year")+ ylab("KWh")+ ggtitle("Energy Consumption by Month")+ #scale_x_discrete(labels= month.abb) + #scale_x_date(labels = date_format("%b"))+ #theme(panel.background = element_rect(fill = rgb(248, 236, 212, maxColorValue = 255)))+ #theme_bw()+ scale_y_continuous(labels = function(x) format(x, scientific =FALSE))+ #scale_colour_manual(name='', # values=c('Active_Power'="#CC6666"), # Kitchen', #'Reactive_Power'="blue"), #Laundry Room'="blue", #'Heater'="darkgreen"), #guide='legend') + facet_wrap( ~ Any ) #facet_grid(facets = Year ~ ., margins = FALSE) ####DO NOT USE THE CODE BELOW YET### ####SEASON SUBSETS# household %>% group_by(SeasonWNames) %>% summarise(mean(Global_active_power)) NA2<- which(is.na(household$Global_active_power)) NA2 ####AHORA GRÁFICOS#### ggplot(data=Data_Month, aes(Month)) + facet_wrap( ~ Year) + geom_line(aes(y = Laundry, color="red")) + geom_line(aes(y = Kitchen, color="green")) ####to show graph of the 3 submeterings#### library(ggplot2) library(lubridate) theme_set(theme_bw()) df <- economics_long[economics_long$variable %in% c("psavert", "uempmed"), ] df <- df[lubridate::year(df$date) %in% c(1967:1981), ] # labels and breaks for X axis text brks <- df$date[seq(1, length(df$date), 12)] lbls <- lubridate::year(brks) # plot ggplot(df, aes(x=date)) + geom_line(aes(y=value, col=variable)) + labs(title="Time Series of Returns Percentage", subtitle="Drawn from Long Data format", caption="Source: Economics", y="Returns %", color=NULL) + # title and caption scale_x_date(labels = lbls, breaks = brks) + # change to monthly ticks and labels scale_color_manual(labels = c("psavert", "uempmed"), values = c("psavert"="#00ba38", "uempmed"="#f8766d")) + # line color theme(axis.text.x = element_text(angle = 90, vjust=0.5, size = 8), # rotate x axis text panel.grid.minor = element_blank()) ####CREATING SUBSETS#### ### 1. If it is Day of Week and Season### ####WORK DAYS VS HOLIDAYS#### #Based on NAs# ####Filtering Dates#### #https://blog.exploratory.io/filter-with-date-function-ce8e84be680# WeekendsOut<-which(household$DiaSemana == 1 | household$xDiaSemana == 2) household<-household[-WeekendsOut,]
9595537b9aee8686236eed47932e68217d9db984
0d811a56cdbb88d97f48438e2715d62c31400e8b
/1.7 Loops.R
4175a916843f9cda16b7cd2782142b440ef485ab
[]
no_license
cbibinski/Exercises
2bd473d76cfed40bfb4b30ebd873ece9f00b8445
06118f2db2a7519e4c1c2d777d5c3d86c7132476
refs/heads/main
2023-01-06T07:55:00.679127
2020-11-09T05:57:17
2020-11-09T05:57:17
309,395,139
0
0
null
null
null
null
UTF-8
R
false
false
487
r
1.7 Loops.R
library(TurtleGraphics) turtle_init() ## turtle makes a square for (side in 1:4){ turtle_forward(distance = 10) turtle_left(angle = 90) } ## turtle makes a triangle for (side in 1:3){ turtle_forward(distance = 10) turtle_left(angle = 120) } ## print the telegram message until "STOP" appears, using a while loop telegram <- c("All","is","well","here","STOP","This","is","fine") word <- "" ind <- 0 while (word != "STOP"){ ind <- ind + 1 word <- telegram[ind] print(word) }
3176dbf434b2942b1e7137b80d4a3c20fe79a809
514864ae56784472819818e1cb58f998c344dfa1
/R/CDA_function.R
082d49b02ec554a7487d20130779abdb4c42fee1
[]
no_license
cran/factas
ce20bf1f6cfc3bfac1b292557b3652e6338a08c8
6294d79925874da143a3ff1719c88da182f16d8e
refs/heads/master
2016-09-06T17:50:42.099668
2014-01-23T00:00:00
2014-01-23T00:00:00
null
0
0
null
null
null
null
UTF-8
R
false
false
6,602
r
CDA_function.R
CDA<-function(data,groups,stream=TRUE,nb_fact,principal_factors=TRUE,principal_axes=FALSE,eigenvalues=FALSE,corr=FALSE,graphics=FALSE,data_init,exec_time,print_step) { data<-preprocess_CDA(data) p<-dim_y <- ncol(data) dim_r<-groups[1] dim_s<-groups[2] X <- A <- Y <- list() for (i in 1:nb_fact) {X[[i]]<-A[[i]]<-Y[[i]]<-c()} L1 <- vector(length=nb_fact) if (stream==TRUE) { init <- CDA_iter(data[1:data_init,],groups,stream,nb_fact,principal_factors,principal_axes,eigenvalues,corr,exec_time) X <- Y <- init$X L1 <- init$L1 #C <- init$Cov_Y_calc #M <- init$M_calc Nn <- init$N_calc #G <- init$K_calc%*%init$A2_calc A2n <- init$A2_calc A <- init$A Gn <- init$M_calc%*%init$K_calc Bn <- init$B_calc Corr <- init$Corr rbar <- init$rbar sbar <- init$sbar G1n<-rbind(Gn,t(sbar)-t(rbar)%*%Gn) n <- data_init+1 table_coord<-matrix(ncol=nb_fact) table_coord<-NULL x<-c() debut_chrono<-proc.time() pas_print<-print_step eps<-0.9 eps2<-0.6 while ( (n<nrow(data)) && ( (proc.time()-debut_chrono)<exec_time ) ) { an <- (1/(n+1000000000))^(eps) an_vp<-(1/(n+1000000000000))^(eps) an2<-(1/(n+100000000))^(eps2) y <- t(t(data[n,])) r<-t(t(y[1:dim_r])) s<-t(t(y[(dim_r+1):dim_y])) k<-0 l<-1 while (k==0){if (s[l]==1){k=l} else {l<-l+1}} r1<-rbind(r,1) s1<-rbind(s,1) rcentre <- r-t(t(rbar)) if (!identical(Y,NULL)) { X <- orth_norm_Gram_Schmidt(Nn,Y) } if (n%%pas_print==0){writeLines(c(paste("n=",n),"\n"))} if (( principal_factors==TRUE)&&(n%%pas_print==0)) { for (i in 1:nb_fact) {writeLines(c(paste("factor ",i,"\n"),X[[i]],"\n"))} } for (i in 1:nb_fact) { Bn<-Gn%*%A2n alpha<-Bn%*%X[[i]] dzeta<-Nn%*%X[[i]] FX <- (t(alpha)%*%dzeta)/(t(X[[i]])%*%dzeta) Y[[i]] <- X[[i]] + an * (alpha - as.numeric(FX)*X[[i]]) if ((principal_axes==TRUE)||(corr==TRUE)) { A[[i]]<-Nn%*%X[[i]] if (n%%pas_print==0) {writeLines(c(paste("axis ",i,"\n"),as.vector(A[[i]]),"\n"))} } if ((eigenvalues==TRUE)||(corr==TRUE)){ L1[i] <- L1[i] - an*(L1[i] - as.numeric(FX)) if (n%%pas_print==0) {writeLines(c(paste("eigenvalue ",i,"\n"),L1[i],"\n"))} } if (corr==TRUE) {Corr[[i]]<-sqrt(abs(L1[i]))*(as.vector(A[[i]])/sqrt(abs(sum(as.vector(A[[i]])^2*solve(Nn)))))/sqrt(diag(Nn)) if (n%%pas_print==0) {writeLines(c(paste("correlation coefficient ",i,"\n"),Corr[[i]],"\n"))} } if (graphics==TRUE){ x[i]<-t(rcentre)%*%X[[i]] } } if (graphics==TRUE) {table_coord<-rbind(table_coord,x)} Nn <- (Nn+rbar%*%t(rbar))*(1-1/n)+r%*%t(r)/n rbar<- rbar-(rbar-r)/n sbar<- sbar-(sbar-s)/n Nn<-Nn - rbar%*%t(rbar) A2n[k,]<-A2n[k,]-an2*(A2n[k,]-t(r)) gamma<-r1%*%t(r1) G1n<-G1n-an2*gamma%*%G1n G1n[,k]<-G1n[,k]+r1 Gn<-G1n[1:dim_r,] n <- n+1 } if (graphics==TRUE) { palette2<-c() palette1<-(palette(gray(seq(0,.9,len=255)))) for (i in 1:length(table_coord[,1])){ palette2[i]<-palette1[floor(i/(floor(nrow(data)/255)+1))+1]} for (i in 1:(nb_fact-1)){ for (j in (i+1):nb_fact){ x<-table_coord[,i] y<-table_coord[,j] x_max<-max(max(abs(x)),1) y_max<-max(max(abs(y)),1) x<-x/x_max y<-y/y_max dev.new() plot(y,x,xlim=c(-1,1),ylim=c(-1,1),col=palette2,xlab= paste("Axe",i),ylab= paste("Axe",j), main=paste("Representation des invidus dans le plan factoriel",i,j)) axis(2,pos=0) axis(1,pos=0) }} if (corr==TRUE){ for (i in 1:(nb_fact-1)){ for (j in (i+1):nb_fact){ dev.new() par(pty="s") plot(as.vector(Corr[[i]]),as.vector(Corr[[j]]), xlim=c(-1,1), ylim=c(-1,1),pch="+",xlab=paste("Axe",i),ylab=paste("Axe",j),main=paste("Cercle des correlations des variables avec les facteurs",i,"et",j)) text(as.vector(Corr[[i]])+0.06,as.vector(Corr[[j]]), labels=1:ncol(data)) symbols(0,0,circles=1, inches=FALSE, add=TRUE) axis(2,pos=0) axis(1,pos=0) } } } } } else { resultat<-CDA_iter(data,groups,stream,nb_fact,principal_factors,principal_axes,eigenvalues,corr,exec_time) X <- resultat$X L1 <- resultat$L1 A <- resultat$A Corr <- resultat$Corr rbar<-resultat$rbar sbar<-resultat$sbar if (principal_axes==TRUE) { for (i in 1:nb_fact){writeLines(c(paste("axis ",i,"\n"),as.vector(A[[i]]),"\n"))} } if (principal_factors==TRUE) { for (i in 1:nb_fact){writeLines(c(paste("factor ",i,"\n"),X[[i]],"\n"))} } if (eigenvalues==TRUE) { for (i in 1:nb_fact){writeLines(c(paste("eigenvalue ",i,"\n"),L1[i],"\n"))} } if (corr==TRUE) { for (i in 1:nb_fact){writeLines(c(paste("correlation coefficient ",i,"\n"),Corr[[i]],"\n"))} } if (graphics==TRUE){ palette2<-c() palette1<-(palette(gray(seq(0,1,len=255)))) for (i in 1:nrow(data)){ palette2[i]<-palette1[floor(i/(floor(nrow(data)/255)+1))+1]} for (i in 1:(nb_fact-1)){ for (j in (i+1):nb_fact){ x<-t(t(data[,1:dim_r])-rbar)%*%X[[i]] y<-t(t(data[,1:dim_r])-rbar)%*%X[[j]] x_max<-max(max(abs(x)),1) y_max<-max(max(abs(y)),1) x<-x/x_max y<-y/y_max dev.new() plot(y,x,xlim=c(-1,1),ylim=c(-1,1),col=palette2,xlab= paste("Axe",i),ylab= paste("Axe",j), main=paste("Representation des invidus dans le plan factoriel",i,j)) axis(2,pos=0) axis(1,pos=0) }} if(corr==TRUE){ for (i in 1:(nb_fact-1)){ for (j in (i+1):nb_fact){ dev.new() par(pty="s") plot(as.vector(Corr[[i]]),as.vector(Corr[[j]]), xlim=c(-1,1), ylim=c(-1,1),pch="+",xlab=paste("Axe",i),ylab=paste("Axe",j),main=paste("Cercle des correlations des variables avec les facteurs",i,"et",j)) text(as.vector(Corr[[i]])+0.06,as.vector(Corr[[j]]), labels=1:ncol(data)) symbols(0,0,circles=1, inches=FALSE, add=TRUE) axis(2,pos=0) axis(1,pos=0) }} } } } }
144f74195c81c8548e344d92d88ba1f7462b292f
1f33d96b17045eb81c4e509aa98bf946f390bef4
/histo.R
56c36fefa689aeb22b4cf7928b1a7658fc215fa3
[]
no_license
georgeredinger/DressageHorsePriceDistribution
392eaad5da8e98af61b160d1d8f6cd0c5394373b
ce587d1116d688575d27a477fec06e4fffdff00d
refs/heads/master
2020-12-24T14:17:46.430771
2013-11-12T19:57:37
2013-11-12T19:57:37
null
0
0
null
null
null
null
UTF-8
R
false
false
109
r
histo.R
data <- read.csv("prices.dat") print(data[,1]) png(filename="histo.png") hist(data[,1],breaks=20) dev.off()
02a3eeb29ec13e1821df1a1d63c0d275daa9c092
c0995410addadff0b5ec94ff06c61a2d33568ce4
/Code/table1.R
6737979102ce05ed458fa7ba15b04fbd62d13d08
[ "CC-BY-4.0", "LicenseRef-scancode-unknown-license-reference" ]
permissive
kotrinak/sociality
4124fc617b2957393bd071fd6523500236413431
244ec15bef50114e7dafe038ca9f767d3e85571c
refs/heads/master
2023-04-12T17:59:29.068654
2022-04-11T22:12:33
2022-04-11T22:12:33
281,337,915
0
0
null
null
null
null
UTF-8
R
false
false
1,260
r
table1.R
# Kotrina Kajokaite # This script makes a table 1 for Kajokaite et al. 2022 library(xtable) library(rethinking) #load data sla <- read.csv("Data/survival_dataset_all.csv", stringsAsFactors = F) slf <- read.csv("Data/survival_dataset_ff.csv", stringsAsFactors = F) slm <- read.csv("Data/survival_dataset_fm.csv", stringsAsFactors = F) # make sure female-female and male-male datsets have ony rows with complete cases slf <- slf[ complete.cases(slf),] slm <- slm[ complete.cases(slm),] # number of females females1 <- length( unique( sla$female ) ) females2 <- length( unique( slf$female ) ) females3 <- length( unique( slm$female ) ) # number of female years female_years1 <- length( sla$female ) female_years2 <- length( slf$female ) female_years3 <- length( slm$female ) # number of deaths deaths1 <- sum( sla$died ) deaths2 <- sum( slf$died ) deaths3 <- sum( slm$died ) # make a table d <- data.frame( Dataset = c("All group members", "Adult females", "Adult males"), NumFemales = c(females1, females2, females3), NumFemaleYears = c(female_years1, female_years2, female_years3), NumDeaths = c(deaths1, deaths2, deaths3 ) ) # TABLE 1 print(xtable(d, digits=c(0,0,0,0,0)), include.rownames=FALSE)
a6889c4aeccb769caed2b4e395ec5fb1d423510d
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/decon/examples/DeconCdf.Rd.R
295c64745a3c541931223cfe86da3b4d59644b30
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
1,072
r
DeconCdf.Rd.R
library(decon) ### Name: DeconCdf ### Title: Estimating cumulative distribution function from data with ### measurement error ### Aliases: DeconCdf ### Keywords: nonparametric smooth measurement error ### ** Examples ##################### ## the R function to estimate the smooth distribution function SDF <- function (x, bw = bw.nrd0(x), n = 512, lim=1){ dx <- lim*sd(x)/20 xgrid <- seq(min(x)-dx, max(x)+dx, length = n) Fhat <- sapply(x, function(x) pnorm((xgrid-x)/bw)) return(list(x = xgrid, y = rowMeans(Fhat))) } ## Case study: homoscedastic normal errors n2 <- 1000 x2 <- c(rnorm(n2/2,-3,1),rnorm(n2/2,3,1)) sig2 <- .8 u2 <- rnorm(n2, sd=sig2) w2 <- x2+u2 # estimate the bandwidth with the bootstrap method with resampling bw2 <- bw.dboot2(w2,sig=sig2, error="normal") # estimate the distribution function with measurement error F2 <- DeconCdf(w2,sig2,error='normal',bw=bw2) plot(F2, col="red", lwd=3, lty=2, xlab="x", ylab="F(x)", main="") lines(SDF(x2), lwd=3, lty=1) lines(SDF(w2), col="blue", lwd=3, lty=3)
b27fc55715f57061956dc55565b15caa82fa066a
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/rsdmx/examples/addSDMXServiceProvider.Rd.R
2e7f5705214d1128d50efb88b2bd7417045a37b4
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
704
r
addSDMXServiceProvider.Rd.R
library(rsdmx) ### Name: addSDMXServiceProvider ### Title: addSDMXServiceProvider ### Aliases: addSDMXServiceProvider ### ** Examples #create a provider myBuilder <- SDMXREST20RequestBuilder(regUrl = "http://www.myorg.org/registry", repoUrl = "http://www.myorg.org/repository", compliant = TRUE) myProvider <- SDMXServiceProvider( agencyId = "MYORG", name = "My Organization", builder = myBuilder ) #add it addSDMXServiceProvider(myProvider) #check out the list of existing provider (only list the agency Ids) sapply(slot(getSDMXServiceProviders(), "providers"), function(x){slot(x, "agencyId")})
4cd9a795bef9614543e1d29db25d798501c2014d
4c7b4c60c3268c1115a70a3adc7c5cc45dbc75c2
/scripts/moby.R
430031d425350ea1788be45888027614df5678b2
[]
no_license
nazabic/data_analysis
cc6d9c7519590a90fbe14b6b71f10ac32e2715aa
8738f5a3882132c300de0bf6db8e8bc2274b6cb3
refs/heads/master
2021-01-16T21:03:32.350206
2016-12-01T17:19:16
2016-12-01T17:19:16
64,401,698
0
0
null
null
null
null
UTF-8
R
false
false
261
r
moby.R
library("poweRlaw") data("moby") m_pl= displ$new(moby) est=estimate_xmin(m_pl) m_pl$setXmin(est) plot(m_pl) lines(m_pl, col=2) m_ln =dislnorm$new(moby) est=estimate_xmin(m_ln) m_ln$setXmin(est) lines(m_ln, col=3) bs=bootstrap(m_pl,no_of_sims=5000,threads=2)
ca07f7357aaf75a7f7cc961654fc77838f89a39a
e876e495beb59578d18d086e93c06d7af92e2231
/plot2.R
53d487c63047a8bde8751cb0b0ee3ad361ded56f
[]
no_license
sjavaad/ExData_Plotting1
56d063b7073abe2591fdfbdb82a5c75c032d1444
5e556ef43d4f9764a779636a1ec92cf879059f1c
refs/heads/master
2020-12-26T03:34:42.962460
2014-05-09T10:48:15
2014-05-09T10:48:15
null
0
0
null
null
null
null
UTF-8
R
false
false
570
r
plot2.R
##Plot 2.R ## for this code to run, we need to install the package sqldf using ## the command "install.packages("sqldf")" [if not already installed] require("sqldf") mySql <- "SELECT * from file WHERE Date = '1/2/2007' OR Date = '2/2/2007'" myTab <- read.csv.sql("household_power_consumption.txt",sql=mySql,sep=";") myTab$myDate <- strptime(paste(myTab$Date, myTab$Time), "%d/%m/%Y %H:%M:%S",tz="EST") ## Plot 2 png(file="plot2.png") plot(myTab$myDate, myTab$Global_active_power,type="l",xlab="",ylab="Global Active Power (kilowatts)") dev.off() ## End of Code
c33d609623671811a7566d9acdfbd240f0d843cf
9b11f370ad33a7bde061eb07ed40b521456c8405
/R/SearchConsole.R
c4ddfea39711fac6e7323d987bc4af1281fd99c6
[]
no_license
ceaksan/PageContentAnalysis
919b1b2404947c2bc73f08c2035247fc1266411c
d67c42aaed3e4801b5c3507561ed187fe4009980
refs/heads/main
2023-07-04T16:03:31.387143
2021-08-18T05:44:54
2021-08-18T05:57:36
352,784,265
1
0
null
null
null
null
UTF-8
R
false
false
1,049
r
SearchConsole.R
library(googleAuthR) library(searchConsoleR) email <- "user@gmail.com" JSONfile <- '<client-id>.apps.googleusercontent.com.json' gar_set_client(JSONfile) gar_auth(email = email, scopes = 'https://www.googleapis.com/auth/webmasters.readonly') SC_sites <- list_websites() site <- SC_sites[which(grepl('domain', SC_sites$siteUrl)),]$siteUrl[2] start_date <- as.Date("2020-01-01") end_date <- as.Date("2020-12-31") gbr_desktop_queries <- search_analytics(site, start_date, end_date, c("query", "page"), dimensionFilterExp = c("device==DESKTOP"), searchType="web") write.csv2(gbr_desktop_queries, file = "gbr_desktop_queries.csv") #gbr_mobile_queries <- # search_analytics(site, # "2020-01-01", "2020-12-31", # c("query", "page"), # dimensionFilterExp = c("device==MOBILE"), # searchType="web") #write.csv2(gbr_mobile_queries, file = "gbr_mobile_queries.csv") gar_deauth()
9fb3f5339da30b7791f377048ea6de551cc673a8
71ab6249e974d0a9bb16ce3fbdc958b7dbb39d3c
/man/ch.getPhitModel.Rd
ea7405f363b792349a25ef4754fb51ec3bd8b220
[]
no_license
ccpluncw/ccpl_R_chutils
bbfe2aa8186dee5f7c4a1afd6fc78021a76674c0
3e55658c38d7d10c129755711bb7b02d0591fb3c
refs/heads/master
2023-04-27T00:37:23.466391
2023-04-16T00:33:44
2023-04-16T00:33:44
136,065,945
0
0
null
null
null
null
UTF-8
R
false
true
876
rd
ch.getPhitModel.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ch.getPhitModel.r \name{ch.getPhitModel} \alias{ch.getPhitModel} \title{a function to return a p(Hit) model that can be evaluated (i.e, an expression) from an nls object} \usage{ ch.getPhitModel( pHitFit, yLab = "p(Hit)", xLab = expression(paste("", Psi, "(value) Distributional overlap", sep = "")) ) } \arguments{ \item{pHitFit}{an nls object - specifically from ch.pHVOfit().} } \value{ expression with of the best fitting model (with parameters and predictor variables) } \description{ This function returns a p(Hit) model that can be evaluated (i.e, an expression) from an nls object } \examples{ ch.getPhitModel (nlsObject) model <- ch.getPhitModel (nlsObject) with(data, eval(model) } \keyword{eval} \keyword{expression} \keyword{model} \keyword{nls} \keyword{object} \keyword{p(Hit)}
6d9753e5b7d75a2585b1031376f1e6a7036513f3
2b8daa92a672b457b9a1dc51a1e5a77b087be724
/man/swslm.Rd
e9d641efc1791b66e84565378780f2873968b062
[]
no_license
bjb40/apcwin
1ea79ab2430c21c2e2fae9e7f3010f11e5f32baf
59d47f3dcc3c4424abd1fb58b6883d96ed8362f7
refs/heads/master
2021-04-26T22:45:54.235820
2018-06-23T18:55:24
2018-06-23T18:55:24
124,143,822
1
0
null
null
null
null
UTF-8
R
false
true
382
rd
swslm.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/samplingfuns.R \name{swslm} \alias{swslm} \title{A wrapper that samples from model space and draws from posterior.} \usage{ swslm(formula, ...) } \arguments{ \item{formula}{a formula.} } \value{ An object of apceffects. } \description{ A wrapper that samples from model space and draws from posterior. }
8c1cbc0c3e3bab69af659a62bb5ddf93e4446213
520c59eb08d90cb65c994c8121782c42617a5558
/005_review_data.R
c583880e4ac6ae9d0f6568e691f92db05594e8d4
[ "MIT" ]
permissive
ASanchez-Tojar/animal_personality_terminology
0269adb487fb0e402c42c8e90609683b17786856
1877ab5f6538016aff8055f52848059f21ce04ee
refs/heads/main
2023-04-07T12:15:27.689782
2022-01-14T09:18:29
2022-01-14T09:18:29
359,851,016
0
0
null
null
null
null
UTF-8
R
false
false
11,601
r
005_review_data.R
################################################################################ # Authors: # Alfredo Sanchez-Tojar (alfredo.tojar@gmail.com) # Affiliation: Dept. Evolutionary Biology, Bielefeld University, Germany # Profile: https://scholar.google.de/citations?user=Sh-Rjq8AAAAJ&hl=de # Script first created on the 7th of July 2020 ################################################################################ # Description of script and Instructions ################################################################################ # This script is to import each of the observer's dataset with the extracted # data, put them together, and explore disagreement between observers. It creates # a combined file that will be reviewed and corrected outside R, i.e. the file # created is not the file to be used for the analyses. The next script, i.e. # 006_review_data_analysis.R will take care of importing the final file and # analyze it. ################################################################################ # Packages needed ################################################################################ pacman::p_load(openxlsx,stringr,dplyr,tidyverse) # Clear memory rm(list=ls()) ################################################################################ # Functions needed ################################################################################ # none ################################################################################ # Import data ################################################################################ # data extraction from Alfredo Sanchez-Tojar AST <- read.xlsx("data/ten_journals/per_observer/ten_journals_fulltext_screening_and_data_extraction_AST.xlsx", colNames=T,sheet = 1) # data extraction from Maria Moiron MM <- read.xlsx("data/ten_journals/per_observer/ten_journals_fulltext_screening_and_data_extraction_MM.xlsx", colNames=T,sheet = 1) # data extraction from Petri Niemela PN <- read.xlsx("data/ten_journals/per_observer/ten_journals_fulltext_screening_and_data_extraction_PN.xlsx", colNames=T,sheet = 1) ################################################################################ # Preparing dataset ################################################################################ # removing an extra column in PN with a couple of non-important comments PN <- as.data.frame(PN %>% select(-X34)) # putting altogether full <- rbind(AST,MM,PN) # removing variables that we do not need or are non-informative full <- select(full,-c(key,month,day,issn,language,publisher,location,notes)) head(full) summary(full) # formatting variables full$studyID <- as.factor(full$studyID) full$journal <- as.factor(full$journal) full$t.and.a_decision <- as.factor(full$t.and.a_decision) full$t.and.a_exclusion_reason <- as.factor(full$t.and.a_exclusion_reason) full$fulltext_exclusion_reason <- as.factor(full$fulltext_exclusion_reason) full$observer <- as.factor(full$observer) # formatting variable levels and making them factors full$fulltext_decision <- as.factor(tolower(full$fulltext_decision)) full$repeatability <- as.factor(tolower(full$repeatability)) full$repeatability_interpretation <- as.factor(tolower(full$repeatability_interpretation)) full$repeatability_consist_predict <- as.factor(tolower(full$repeatability_consist_predict)) full$repetability_comparison <- as.factor(tolower(full$repetability_comparison)) full$repetability_comparison_interpretation <- as.factor(tolower(full$repetability_comparison_interpretation)) full$unstandardize_variance <- as.factor(tolower(full$unstandardize_variance)) summary(full) ############################################## # repeatability_interpretation # "no" actually means "none" in this variable: # full[full$repeatability_interpretation=="no" & !(is.na(full$repeatability_interpretation)),] table(full$repeatability_interpretation) full$repeatability_interpretation <- recode(full$repeatability_interpretation , "among-individual" = "among", "among_individual" = "among", "within-individual" = "within", "within_individual level" = "within", "no" = "none", .default = levels(full$repeatability_interpretation )) full$repeatability_interpretation <- factor(full$repeatability_interpretation) table(full$repeatability_interpretation) ############################################## # repeatability_consist_predict table(full$repeatability_consist_predict) full$repeatability_consist_predict <- recode(full$repeatability_consist_predict , "individual consistency" = "yes", .default = levels(full$repeatability_consist_predict )) full$repeatability_consist_predict <- factor(full$repeatability_consist_predict) table(full$repeatability_consist_predict) ############################################## # repeatability_consist_predict table(full$repetability_comparison_interpretation) full$repetability_comparison_interpretation <- recode(full$repetability_comparison_interpretation , "among-individual" = "among", "within-individual" = "within", "within_individual level" = "within", "both " = "both", "both (backet up nicely with actual variances)" = "both", .default = levels(full$repetability_comparison_interpretation)) full$repetability_comparison_interpretation <- factor(full$repetability_comparison_interpretation) table(full$repetability_comparison_interpretation) # APR155: context placed on the wrong variable. Moving it to the right place full[full$studyID=="APR155" & full$observer=="MM","repetability_comparison_interpretation_context"] <- full[full$studyID=="APR155" & full$observer=="MM","repetability_comparison_interpretation"] full[full$studyID=="APR155" & full$observer=="MM","repetability_comparison_interpretation"] <- NA full$repetability_comparison_interpretation <- factor(full$repetability_comparison_interpretation) table(full$repetability_comparison_interpretation) ############################################## # unstandardize_variance table(full$unstandardize_variance) # creating a new variable after recoding full$unstandardize_variance.2 <- recode(full$unstandardize_variance , "no access to supl" = "no", "yes (but has to be calculated from the sum of squares manually)" = "partially", "yes (but not id level estimates, only colony level)" = "partially", "yes (for one trait)" = "partially", "yes (in a figure)" = "yes", .default = levels(full$unstandardize_variance)) table(full$unstandardize_variance.2) summary(full) ################################################################################ # checking data expectations to find error/missing data ################################################################################ ############################################## # fulltext_decision # making sure that if fulltext_decision=="no" all the data variables are "NA" full[full$fulltext_decision=="no" & !(is.na(full$fulltext_decision)),] ############################################## # repeatability # making sure that if repeatability=="no" all the data variables are "NA" full[full$repeatability=="no" & !(is.na(full$repeatability)),c("studyID","repeatability", "repeatability_interpretation", "repeatability_consist_predict", "repetability_comparison", "repetability_comparison_interpretation", "unstandardize_variance", "unstandardize_variance.2", "observer")] # changing "unstandardize_variance" and "unstandardize_variance.2" to NA if repeatability == "no" (only PN filled this variable in when repeatability == "no") full[full$repeatability=="no" & !(is.na(full$repeatability)),c("unstandardize_variance", "unstandardize_variance.2")] <- NA # making sure that if repeatability=="yes" there is no missing data summary(full[full$repeatability=="yes" & !(is.na(full$repeatability)),c("studyID","repeatability", "repeatability_interpretation", "repeatability_consist_predict", "repetability_comparison", "unstandardize_variance.2", "observer")]) # needs double-checking: none! full[full$repeatability=="yes" & !(is.na(full$repeatability)) & is.na(full$repeatability_consist_predict), c("studyID","repeatability", "repeatability_interpretation", "repeatability_consist_predict", "repetability_comparison", "unstandardize_variance.2", "observer")] ############################################## # repetability_comparison # making sure that if repetability_comparison=="no" repetability_comparison_interpretation is "NA" full[full$repetability_comparison=="no" & !(is.na(full$repetability_comparison)),c("studyID", "repetability_comparison", "repetability_comparison_interpretation", "observer")] ################################################################################ # sorting database to check observer agreement ################################################################################ counts <- as.data.frame(table(full$studyID)) names(counts) <- c("studyID","times.extracted") # adding the counts to the database full.counts <- merge(full,counts,by="studyID",all.x=T) # sorting by times.extracted to make it easy full.counts <- arrange(full.counts,-times.extracted,studyID,observer) ############################################################ # exporting clean dataset ############################################################ # exporting data write.xlsx(full.counts, "data/ten_journals/combined/ten_journals_fulltext_screening_and_data_extraction_combined.xlsx", sheetName="Sheet1",col.names=TRUE, row.names=F, append=FALSE, showNA=TRUE, password=NULL)
69eb9cf7210cec304ffcd74a27dc42399a1a802a
df472aa1f924985294ee7ead8fa2535b7f861044
/NUCOMBog/tests/testthat/testBasicFunctions.R
d59356485c0fac476a7d55f2da378ae9c57fcedd
[]
no_license
COST-FP1304-PROFOUND/NUCOMBog
9574e90a9390bb92a9f8a57720930f04b367d93e
be3b07478221bd3896ea621f8e5f2b10d9a12a45
refs/heads/master
2021-01-20T05:43:33.174840
2017-04-29T17:21:08
2017-04-29T17:21:08
null
0
0
null
null
null
null
UTF-8
R
false
false
184
r
testBasicFunctions.R
context("Test basic functions") set.seed(1) library(NUCOMBog) test_that("Basic functions work",{ skip_on_cran() # Add functions that can be run without the model here } )
69a963727848ad6a04eaad470c0afb57475e8288
c5f44a778993b372be8006af73e8568597553ef3
/scripts/network_graph.R
16eebbfbaedf6b2cda308008968d7c2dbd730fb7
[]
no_license
conorotompkins/spotify
ee8bd5c86b9f20deeb09ad262cab72f5b2bc842e
20d051055168d6908ca5b64963b81c52c42a8969
refs/heads/master
2022-11-24T13:56:40.987340
2020-07-29T18:19:25
2020-07-29T18:19:25
283,020,703
0
0
null
null
null
null
UTF-8
R
false
false
3,092
r
network_graph.R
library(tidyverse) library(geniusr) #edit r environment file here #usethis::edit_r_environ() geniusr::genius_token() geniusr::search_artist("Queens of the Stone Age") #geniusr::search_artist("Desert Sessions") qotsa <- geniusr::get_artist(artist_id = 25320) qotsa_songs <- geniusr::get_artist_songs_df(25320) qotsa_songs qotsa_songs %>% slice(3) %>% pull(song_id) %>% map(~get_song(song_id = .x)) %>% map(c("content", "writer_artists")) %>% flatten() %>% map_chr("name") %>% paste(collapse = ", ") get_personnel <- function(var_song_id, personnel_type){ text <- map(var_song_id, get_song) %>% map(c("content", personnel_type)) %>% flatten() %>% map_chr("name") %>% paste(collapse = ", ") if (text == "") { return(NA_character_) } else{ return(text) } } get_personnel(118898, "producer_artists") get_album_name <- function(var_song_id){ text <- map(var_song_id, get_song) %>% map(c("content", "album")) %>% #flatten() %>% map_chr("name") %>% paste(collapse = ", ") if (text == "") { return(NA_character_) } else{ return(text) } } get_album_name(118898) %>% str() qotsa_artists <- qotsa_songs %>% #slice(1:10) %>% mutate(writer = map(song_id, get_personnel, personnel_type = "writer_artists"), producer = map(song_id, get_personnel, personnel_type = "producer_artists")) %>% mutate(producer = str_replace_all(producer, "The Fififf Teeners", "Josh Homme, Chris Goss")) %>% separate_rows(writer, sep = ", ") %>% separate_rows(producer, sep = ", ") %>% pivot_longer(cols = c(writer, producer), names_to = "personnel_type", values_to = "personnel_name") qotsa_artists qotsa_artists %>% filter(personnel_name == "The Fififf Teeners") qotsa_artists %>% distinct(song_name, personnel_name) %>% count(song_name) %>% mutate(song_name = str_squish(song_name) %>% str_to_lower(.)) %>% ggplot(aes(n)) + geom_histogram() qotsa_artists %>% filter(personnel_type == "producer") %>% count(personnel_name, sort = TRUE) library(tidygraph) library(ggraph) qotsa_artists %>% distinct(song_name, personnel_type, personnel_name) %>% filter(!is.na(personnel_name)) %>% group_by(personnel_name) %>% filter(n() > 1) %>% ungroup() %>% select(song_name, personnel_name) %>% widyr::pairwise_count(personnel_name, song_name, diag = FALSE, upper = FALSE) %>% as_tbl_graph(directed = FALSE) %>% activate(edges) %>% #filter(n > 1) %>% activate(nodes) %>% #filter(!node_is_isolated()) %>% ggraph() + geom_edge_fan(aes(edge_width = n, edge_alpha = n), color = "white") + geom_node_point(size = 4, color = "red") + geom_node_label(aes(label = name), repel = TRUE) + labs(title = "Queens of the Stone Age personnel network", edge_width = "Distinct connections", edge_alpha = "Distinct connections") + theme_void() + theme(plot.background = element_rect(fill = "black"), title = element_text(color = "white"), legend.text = element_text(color = "white"))
bfaf3c3637eeb2accf8ab2fb9f952747f0628684
03da8319e0dc8f3d378e10526eb4e87f4884c121
/R/data.R
8e4d3f72d957b71c261b6d48e0ce3f38f8f17079
[]
no_license
cran/GPP
99fee8d8826a40a7a19c9432e94c18c4ffccf7ec
f21ba020791544afe3d98dc21375a2ae945990f6
refs/heads/master
2023-01-21T10:59:32.093557
2020-11-27T09:20:06
2020-11-27T09:20:06
317,816,967
0
0
null
null
null
null
UTF-8
R
false
false
787
r
data.R
#' 1960-2003 GDP dataset #' #' An example dataset for using \code{\link{GPP}} to estimate the counterfactual GDP of West Germany assuming no reunification. #' #' @format A data frame with 748 rows and 14 columns. For detailed explanations of the exact measures, see \url{https://www.dropbox.com/s/n1bvqb54xrw8vyj/GPSynth.pdf?dl=0}: #' \describe{ #' \item{index}{} #' \item{country}{} #' \item{year}{} #' \item{gdp}{} #' \item{infrate}{} #' \item{trade}{} #' \item{schooling}{} #' \item{invest60}{} #' \item{invest70}{} #' \item{invest80}{} #' \item{industry}{} #' \item{invest}{} #' \item{school}{} #' \item{ind}{} #' } #' @seealso \code{\link{GPP}} \code{\link{plotGPPfit}} \code{\link{writeMod}} \code{\link{runMod}} \code{\link{autoConverge}} "GDPdata"
434827fd75bfe4178a03d8606391602c0efca48c
29585dff702209dd446c0ab52ceea046c58e384e
/BSquare/R/qreg_spline.R
fe62b0ea55b123e3447dffd5bf7e721310d2359a
[]
no_license
ingted/R-Examples
825440ce468ce608c4d73e2af4c0a0213b81c0fe
d0917dbaf698cb8bc0789db0c3ab07453016eab9
refs/heads/master
2020-04-14T12:29:22.336088
2016-07-21T14:01:14
2016-07-21T14:01:14
null
0
0
null
null
null
null
UTF-8
R
false
false
13,159
r
qreg_spline.R
qreg_spline <- function(X,Y=NULL,Y_low=NULL,Y_high=NULL,status = NULL, knots_inter = c(.1,.5,.9), Pareto = TRUE, varying_effect=NULL, tau= seq(0.05,0.95,0.05), burn=10000, iters=50000, q_low = 0.01, q_high = 0.99, sig_a = .1, sig_b = .1, mu_var = 10^2, cbf_var = 10^3, tail_mean = -1.2, tail_var = .4, cbf_eps = 0.5, theta_eps = 0.5, tuning_theta = 1, tuning_tail = rep(1,2), cred_low = .025, cred_high = .975, seed = 1, verbose = TRUE ){ library(quantreg); library(quadprog) n1 <- n<- length(Y) n2<-n3<-n4<-n5<-0 if(length(status)>0){ n1<- sum(status==0) n2<- sum(status==1) n3<- sum(status==2) n4<- sum(status==3) n5<- sum(status==4) n<- n1+n2+n3+n4 if(n != length(status)){ print("Invalid entry for status") stop() } order<-c(which(status==0),which(status==1),which(status==2),which(status==3)) if(!is.null(Y_low)){ Y_low<- Y_low[order]} if(!is.null(Y_high)){Y_high<- Y_high[order]} if(!is.null(Y)){ Y<-Y[order]} X<-X[order,] } N<-nrow(X) P<-ncol(X) P1<-varying_effect if(is.null(P1)){P1<-P} if(P1<1 || P1>P){ print("Invalid number of covariates affecting the scale") stop() } if(sum(is.na(X))>0){ print("Must remove observations with missing covariates") stop() } if(mean(X[,1]==1)<1){ print("First column of X must be all ones") stop() } if(min(abs(X))>1){ print("All values of X much be between -1 and 1") stop() } Yinit<-rowMeans(cbind(Y,Y_low,Y_high),na.rm=TRUE) sumY<-sum(Yinit) if(is.na(sumY)){ print("Can't have missing responses") } if(abs(sumY)==Inf){ print("Responses must be finite") } if(q_low > .1){ print("Lower threshold cannot exceed 0.1") stop() } if(q_high < .9){ print("Upper threshold must be at least 0.9") stop() } if(sig_a < 0 || sig_b < 0 || mu_var < 0 || cbf_var < 0 || tail_var < 0){ print("All scale, shape and variance hyperparameters must be positive") } if(cbf_eps<.1 || cbf_eps > .5 || theta_eps < .1 || theta_eps > .5){ print("All resolvant parameters must be in [0.1,0.5]") stop() } if(tuning_theta < 0 || min(tuning_tail) < 0){ print("All candidate variances must be positive") stop() } burn<- as.integer(burn) sweeps<- as.integer(iters) if(burn < 1 || sweeps < 1){ print("Need positive integer for burns and iters") stop } if(cred_low < 0){ print("need lower limits of credible interval in (0,1)") stop } if(cred_high > 1){ print("need upper limits of credible interval in (0,1)") stop } if(cred_low >= cred_high){ print("need cred_low < cred_high") stop } if(length(knots_inter)> length(unique(knots_inter))){ print("knots must be unique") stop } if(q_low <= 0){ print("lower threshold must be greater than 0") stop } if(q_high >= 1){ print("upper threshold must be less than 1") stop } xi_zero<- 1 - Pareto tau<-sort(tau) knots_inter<-sort(knots_inter) N_tau<-length(tau) tau_start <- seq(.05,.95,.05) N_tau_start<- length(tau_start) N<-nrow(X) spline_df<-3 M<- 1+ spline_df+length(knots_inter) # number of spline basis functions plus intercept M_knots<-mspline_knots(knots_inter,spline_df) I_knots<-ispline_knots(knots_inter,spline_df) M_knots_length<-length(M_knots) I_knots_length<-length(I_knots) III_start<-matrix(0,nrow=length(tau_start),ncol=length(knots_inter)+spline_df) for(i in 1:ncol(III_start)){ III_start[,i]<-t(sapply(tau_start,ispline,spline_df=spline_df,m=i,I_knots=I_knots)) #constructs the spline basis evaluated at the Tau1 knots } III_start<-cbind(1,III_start) II<-kronecker(diag(P),III_start) I_low<- make_I(nu=1:(M-1),tau = q_low, spline_df = 3, I_knots = I_knots) I_high<- make_I(nu=1:(M-1),tau = q_high, spline_df = 3, I_knots = I_knots) M_low<- make_M(nu=1:(M-1),tau=q_low,spline_df = 3, M_knots = M_knots) M_high<- make_M(nu=1:(M-1),tau=q_high,spline_df = 3, M_knots = M_knots) M_low <- q_low * M_low M_high <- (1 - q_high) * M_high dummy_y <- rep(0,N) if(length(status)==0){status<- rep(0,N)} if(sum(status==0) > 0){dummy_y[c(status==0)]<-Y[c(status==0)]} if(sum(status==1) > 0){set.seed(seed); dummy_y[c(status==1)]<- Y[c(status==1)] + rnorm(sum(status==1),.1) } #left censored if(sum(status==2) > 0){set.seed(seed); dummy_y[c(status==2)]<- Y[c(status==2)] + rnorm(sum(status==2),.1) } #right censored if(sum(status==3) > 0){set.seed(seed); dummy_y[c(status==3)]<- (Y_low[c(status==3)] + Y_high[c(status==3)])/2 + rnorm(sum(status==3),.1) } #interval censored fit<-0 betahat_matrix<-matrix(0,N_tau_start*P,1) suppressWarnings( tryCatch( fit<-quant_sim(dummy_y,X[,2:P],tau=tau_start), error=function(e){print("Initial fit failed");return(fit)} ) ) if(length(fit)==1){ fit<- list(beta_hat=rep(1,N_tau_start*P)) } betahat_matrix[,1]<-fit$beta_hat #Koenker's quantile estimates theta_start<- array(0,dim=c(M,P,3)) theta_start[1,1,1]<- min(dummy_y) theta_start[2:M,1,1]<- (max(dummy_y) - min(dummy_y))/(P-1) if(var(betahat_matrix) !=0){ tryCatch( theta_start[,,]<-mle_start_2(c(betahat_matrix[,1]),II,N_tau_start,P,M), error=function(e){return(theta_start)}) } thetastar<-theta_start[,,1] if(P1 < P){thetastar[,(P1 + 1):P] <- 0} #starting value for sampler old_tau<-rep(.5,N) bin<-rep(spline_df+2,N) iter_flag<-0 tuning_parms = array(10,dim=c(M,P)) if(!is.null(Y)){Y[is.na(Y)]<-0} if(!is.null(Y_low)){Y_low[is.na(Y_low)]<-0} if(!is.null(Y_high)){Y_high[is.na(Y_high)]<-0} M_1 <- M - 1 IKM<- matrix(0,M_1,6) for(m in 1:(M-1)){ IKM[m,1] <- (I_knots[m+1] - I_knots[m]) * (I_knots[m + 2] - I_knots[m]) * (I_knots[m + 3] - I_knots[m]) IKM[m,2] <- (I_knots[m+2] - I_knots[m+1]) IKM[m,3] <- ((I_knots[m+3] - I_knots[m+1]) * (I_knots[m+2] - I_knots[m+1])) IKM[m,4] <- ((I_knots[m+3] - I_knots[m+1]) * (I_knots[m+3] - I_knots[m]) * (I_knots[m+2]-I_knots[m+1])) IKM[m,5] <- -((I_knots[m+3] - I_knots[m]) * (I_knots[m+2] - I_knots[m]) * (I_knots[m+2] - I_knots[m+1])) IKM[m,6] <- -((I_knots[m+3] - I_knots[m+2]) * (I_knots[m+3] - I_knots[m+1]) * (I_knots[m+3] - I_knots[m])) } IKM<- ifelse(IKM==0,0,IKM^-1) MKM<- IKM MKM[,1]<- 3*MKM[,1] MKM[,2]<- 3*IKM[,2] MKM[,4]<- -3 * MKM[,4] MKM[,5]<- 3 * MKM[,5] MKM[,6]<- -3 * MKM[,6] thetastar<- array(0,dim=c(M,P)) thetastar[,1]<-1 theta_keep<- array(dim=c(iters, M * P, 1)) tuning_parms_keep<- array(dim=c(M * P,1, 1)) acc_theta_keep <- array(dim = c(M * P, 1)) ############## IP prior inputs ####################### mu<- array(0,dim=c(1,P)) sigma2 <- array(1,dim=c(1,P)) rho <- array(.5,dim=c(1,P)) mu_keep <- array(dim=c(iters, 1 * P, 1)) sigma2_keep<-rho_keep<-array(dim=c(iters,1*P,1)) ############### tail inputs ################# xi_low <- xi_high <- 1 set.seed(seed) tick<- proc.time() out<- MCMC_IP_C(burn,iters, tuning_parms, tuning_tail, cbf_eps, theta_eps, M, P, P1, N, n1, n2, n3, n4, n5, Y, Y_low, Y_high, X, M_knots_length, I_knots_length, spline_df, M_knots, I_knots, M_low, M_high, I_low, I_high, thetastar, mu, sigma2, rho, xi_low, xi_high, q_low, q_high, xi_zero, mu_var, cbf_var, tail_mean, tail_var, sig_a, sig_b, IKM, MKM, M_1,verbose) tock<- proc.time() MCMC_time <- tock - tick if(verbose){print("Compiling results...")} # common values post_theta<- matrix(out$THETA,nrow=out$iters,byrow=T) tuning_parms<- out$tuning_parms acc_theta <- out$ACC_THETA/out$ATT_THETA theta_out<- round(out$THETA_OUT/out$sweeps,1) #n_times theta was outside of parameter space theta_in<- round(out$THETA_IN/out$sweeps,1) # prior values post_mu<- matrix(out$MU,nrow=out$iters,byrow=T) post_sigma2<- matrix(out$SIGMA2,nrow=out$iters,byrow=T) post_rho<- matrix(out$RHO,nrow=out$iters,byrow=T) rho_accepted<- round(matrix(out$ACC_RHO/out$iters,1,nrow=P),2) # tail values post_xi_low <- out$XI_LOW post_xi_high <- out$XI_HIGH ############################# Part 2: Credible Intervals post_q<- array(0,dim=c(iters,N_tau,ncol=P)) post_q_lower<- post_q_upper <- post_q_mean<- matrix(0,N_tau,P) I_high<- make_I(nu=1:(M-1),tau = q_high, spline_df = 3, I_knots = I_knots) M_low<- make_M(nu=1:(M-1),tau=q_low,spline_df = 3, M_knots = M_knots) M_high<- make_M(nu=1:(M-1),tau=q_high,spline_df = 3, M_knots = M_knots) N_tau_low <- sum(tau < q_low) N_tau_mid <- sum( (q_low <= tau)*(tau <= q_high)) N_tau_high <- sum(tau > q_high) if(N_tau_low > 0){ if(Pareto){ for(i in 1:N_tau_low){ I_low<- make_I(nu=1:(M-1),tau = q_low, spline_df = 3, I_knots = I_knots) M_low<- make_M(nu=1:(M-1),tau=q_low,spline_df = 3, M_knots = M_knots) for(p in 1:P){ low_scale <- post_theta[,((p-1)*M+1) : (p*M)]%*%M_low Q_low <- post_theta[,((p-1)*M+1) : (p*M)]%*%I_low post_q[,i,p] <- Q_low - (q_low/post_xi_low) * low_scale * ((tau[i] / q_low)^-post_xi_low -1) } } } else{ for(i in 1:N_tau_low){ I_low<- make_I(nu=1:(M-1),tau = q_low, spline_df = 3, I_knots = I_knots) M_low<- make_M(nu=1:(M-1),tau=q_low,spline_df = 3, M_knots = M_knots) low_scale <- post_theta[,1:M]%*%M_low Q_low <- post_theta[,1:M]%*%I_low post_q[,i,1] <- Q_low + q_low * low_scale * log(tau[i]/q_low) for(p in 2:P){ low_scale <- post_theta[,((p-1)*M+1) : (p*M)]%*%M_low Q_low <- post_theta[,((p-1)*M+1) : (p*M)]%*%I_low post_q[,i,p] <- Q_low + q_low * low_scale * log(tau[i]/q_low) } } } } if(N_tau_mid > 0){ tau_mid <- tau[(N_tau_low + 1):(N_tau_low + N_tau_mid)] III<-matrix(0,nrow=N_tau_mid,ncol=length(knots_inter)+spline_df) for(i in 1:ncol(III)){ III[,i]<-t(sapply(tau_mid,ispline,spline_df=spline_df,m=i,I_knots=I_knots)) #constructs the spline basis evaluated at the Tau1 knots } III<-cbind(1,III) for(p in 1:P){ post_q[,(N_tau_low + 1):(N_tau_low + N_tau_mid),p]<- post_theta[,((p-1)*M+1) : (p*M)]%*%t(III) } } if(N_tau_high > 0){ if(Pareto){ for(i in (N_tau_low + N_tau_mid + 1):N_tau){ I_high<- make_I(nu=1:(M-1),tau = q_high, spline_df = 3, I_knots = I_knots) M_high<- make_M(nu=1:(M-1),tau=q_high,spline_df = 3, M_knots = M_knots) for(p in 1:P){ high_scale <- post_theta[,((p-1)*M+1) : (p*M)]%*%M_high Q_high <- post_theta[,((p-1)*M+1) : (p*M)]%*%I_high post_q[,i,p] <- Q_high + (1 - q_high) * high_scale / post_xi_high * ((( 1 - tau[i]) /(1 - q_high))^-post_xi_high -1) } } } else{ for(i in (N_tau_low + N_tau_mid + 1):N_tau){ I_high<- make_I(nu=1:(M-1),tau = q_high, spline_df = 3, I_knots = I_knots) M_high<- make_M(nu=1:(M-1),tau=q_high,spline_df = 3, M_knots = M_knots) high_scale <- post_theta[,1:M]%*%M_high Q_high <- post_theta[,1:M]%*%I_high post_q[,i,1] <- Q_high + q_high * high_scale * log((1 - tau[i])/(1 - q_high)) for(p in 1:P){ high_scale <- post_theta[,((p-1)*M+1) : (p*M)]%*%M_high Q_high <- post_theta[,((p-1)*M+1) : (p*M)]%*%I_high post_q[,i,p] <- Q_high - q_high * high_scale * log((1 - tau[i])/(1 - q_high)) } } } } for(p in 1:P){ post_q_lower[,p] <- apply(post_q[,,p],2,quantile,probs=cred_low) post_q_upper[,p]<- apply(post_q[,,p],2,quantile,probs=cred_high) post_q_mean[,p]<- apply(post_q[,,p],2,mean) } list(q=post_q, tau=tau, theta=post_theta, tuning_parms=tuning_parms, acc_theta = acc_theta, N_tau = N_tau, post_mu=post_mu, post_sigma2=post_sigma2, rho_keep=rho_keep, post_xi_low = post_xi_low, post_xi_high = post_xi_high, q = post_q, q_lower = post_q_lower, q_upper = post_q_upper, q_mean = post_q_mean, MCMC_time = MCMC_time, tau = tau, LPML=out$LPML,CPO=out$CPO, MCMC_time=MCMC_time, LPML = out$LPML,CPO=out$CPO, iters=iters,burn=burn )}
1c965aaf090da4e737b6df741ba2ee1ca9164759
2099a2b0f63f250e09f7cd7350ca45d212e2d364
/AI-Dataset/Summary_rnd/S0004370215000776.xml.A.R
cfdd5381ec4b9943c95ee23a4729c1c2ee892194
[]
no_license
Angela7126/SLNSumEval
3548301645264f9656b67dc807aec93b636778ef
b9e7157a735555861d2baf6c182e807e732a9dd6
refs/heads/master
2023-04-20T06:41:01.728968
2021-05-12T03:40:11
2021-05-12T03:40:11
366,429,744
3
0
null
null
null
null
UTF-8
R
false
false
981
r
S0004370215000776.xml.A.R
<html> <head> <meta name="TextLength" content="SENT_NUM:6, WORD_NUM:92"> </head> <body bgcolor="white"> <a href="#0" id="0">The main property of this encoding is that it correctly captures consistent fixpoints of the approximate operator.</a> <a href="#1" id="1">Let {a mathematical formula}E⊆Zu¯ be directed and {a mathematical formula}e¯=⨆FE be its least upper bound in {a mathematical formula}(F,≤i).</a> <a href="#2" id="2">Although the operator is defined for all pairs (including inconsistent ones), its restriction to consistent pairs is well-defined since it maps consistent pairs to consistent pairs.</a> <a href="#3" id="3">Then{a mathematical formula}{a mathematical formula}{a mathematical formula}The “if” direction is both times trivially satisfied.</a> <a href="#4" id="4">Consider any{a mathematical formula}I∈{G,U}.</a> <a href="#5" id="5">{a mathematical formula}G={GΞ|Ξ is an ADF}, {a mathematical formula}U={UΞ|Ξ is an ADF}</a> </body> </html>
f1c7d1cb67b6516f7031fbf1e8dc157c83865abd
9d606b309626fe67b171a16175717fbaab2e5bb5
/Functions/DWstats.R
20685e5837a7bfbf17a6f9c237106bd43304784f
[]
no_license
yuxidchen/LupronCode
b7c8fa6885b08156841bbff6a8d6c24a82ca13b2
ab593f313a1250c6318dc3028cf83da80ccfa16c
refs/heads/master
2020-12-24T19:28:13.030188
2016-05-19T23:29:12
2016-05-19T23:29:12
59,250,020
0
0
null
null
null
null
UTF-8
R
false
false
356
r
DWstats.R
DWstat <- function(residuals) { LagResiduals = MyLag(residuals, 1) # DWData$numerator <- ifelse(DWData$Index>1 & !is.na(DWData$Residuals),(DWData$Residuals - DWData$LagResiduals)^2,0) numerator = (residuals - LagResiduals)^2 result = sum(numerator)/sum(residuals^2) # return(as.numeric(sum(DWData$numerator)/sum(res^2))) return(result) }
9228461856eefa6242354e0547579cdbb19a17b6
e2bf42ce43945365e26a34567083ce0f0f0946f9
/utility/apply_pbem_scaling_factor_on_oncotab.R
1a9092e0e81f1a1c770832a75446d2b3d4a69294
[]
no_license
ncasiraghi/abemus_raw_code
0a665916a8f64c8d65719300a0473862de288790
65eda0d883c74d03761d352fea7c46e6be7e2467
refs/heads/master
2020-03-27T09:06:55.383957
2019-02-28T11:24:30
2019-02-28T11:24:30
null
0
0
null
null
null
null
UTF-8
R
false
false
1,431
r
apply_pbem_scaling_factor_on_oncotab.R
#!/usr/bin/env Rscript args <- commandArgs(trailingOnly = TRUE) if(length(args)!=2){ message("\nERROR:\t2 argument required") message("\nUSAGE:\tRscript apply_pbem_scaling_factor_on_oncotab.R <Results_folder_abemus> <Pbem scaling factor>\n") cat(paste("\t[1] path\tResults folder generated by abemus","\n")) cat(paste("\t[2] SF \tpbem scaling factor to apply","\n\n")) quit() } library(data.table) # Abemus Results folder wd = args[1] SF = as.numeric(args[2]) ld = list.dirs(path = wd,full.names = T,recursive = F) #x=ld[1] for(x in ld){ message(x) setwd(x) pmtab.file=list.files(x,full.names = T,pattern = "\\.onco\\.tsv$") a=read.delim(pmtab.file,as.is = T,stringsAsFactors = F) a$filter.pbem_coverage = a$filter.pbem_coverage * SF a$pass.filter.pbem_coverage = 0 a$pass.filter.pbem_coverage[which(a$af_case >= a$filter.pbem_coverage)] = 1 a=a[which(a$pass.filter.pbem_coverage==1),] # print non-synonymous snvs a.miss = a[which(a$Variant_Classification %in% c("Missense_Mutation","Nonsense_Mutation")),] out.name=gsub(basename(pmtab.file),pattern = "\\.tsv",replacement = paste0(".miss.",SF,".tsv")) write.table(x = a.miss,file = out.name,quote = F,sep = "\t",row.names = F,col.names = T) # print all snvs out.name=gsub(basename(pmtab.file),pattern = "\\.tsv",replacement = paste0(".",SF,".tsv")) write.table(x = a,file = out.name,quote = F,sep = "\t",row.names = F,col.names = T) }
2dcabc6ac48d1ccc941b9f7abe97fafc1ea04c10
9fb76e69e684b8c7af76feb3ab303375a3b19b85
/clustering/code_clustering.r
1e2ad56ed36a39709e23613b2b2767d70fb8e058
[]
no_license
JayTheJay/Text-Mining_Depression_Dataset
638c8a00d47233ecbef9ef2d7602198f96b4d51f
858829d89aacbad6ad7b8c8c339614a9ad6d66cd
refs/heads/master
2022-12-06T14:33:01.744642
2020-08-30T16:39:24
2020-08-30T16:39:24
291,509,996
0
0
null
null
null
null
UTF-8
R
false
false
5,439
r
code_clustering.r
setwd("C:\\Users\\Maciek\\Desktop\\TextMiningProject") library(jsonlite) library(tm) library(SnowballC) library(tidytext) library(dplyr) library(wordcloud) library(cluster) library(fpc) library(ggplot2) data <- fromJSON(txt = "data\\texts.txt") #preparing data cleaned_txt <- Corpus(VectorSource(data)) cleaned_txt <- tm_map(cleaned_txt, content_transformer(tolower)) #removing some additional words which we found that do not remove using stopwords('english') stop_words = c("’re", "’s", "’m", "’", "…", '“', 'httpstco…', "’r") cleaned_txt<- tm_map(cleaned_txt, removeWords, c(stopwords("english"), stop_words)) cleaned_txt<- tm_map(cleaned_txt, removePunctuation) cleaned_txt <- tm_map(cleaned_txt, removeNumbers) cleaned_txt<- tm_map(cleaned_txt, stripWhitespace) cleaned_txt <- tm_map(cleaned_txt, stemDocument) #converting into document term matrix dtm <- DocumentTermMatrix(cleaned_txt) #changing format of dtm and then removing some of the characters that couldn't be #removed using t_map xxx <- tidy(dtm) dim(xxx) head(xxx) xxx <- xxx %>% filter(term != '…' & term != '’' & term != '“') #going back to dtm dtm <- xxx %>% cast_dtm(document, term, count) #freq <- colSums(as.matrix(dtm)) #length(freq) #ord <- order(freq) #m <- as.matrix(dtm) #dim(m) #write.csv(m, file="DocumentTermMatrix.csv") #Removing sparse terms with the maxium sparsity equal to 0.99 dtms_2 <- removeSparseTerms(dtm, 0.99) #Word frequencies for dtm freq <- colSums(as.matrix(dtms_2)) #Brief look at dtms' frequencies findFreqTerms(dtms_2, lowfreq=20) findFreqTerms(dtms_2, lowfreq=50) findFreqTerms(dtms_2, lowfreq=100) findFreqTerms(dtms_2, lowfreq=1000) wf <- data.frame(word=names(freq), freq=freq) mean(wf$freq) #280.816 median(wf$freq) #187 #Words with the frequency higher than the mean p <- ggplot(subset(wf, freq>281), aes(reorder(word, -freq), freq)) p <- p + geom_bar(stat="identity", fill='#cf5a86') p <- p + theme(axis.text.x=element_text(angle=45, hjust=1)) p #Words with the frequency higher than the median p2 <- ggplot(subset(wf, freq>187), aes(reorder(word, -freq), freq)) p2 <- p2 + geom_bar(stat="identity", fill='#cf5a86') p2 <- p2 + theme(axis.text.x=element_text(angle=45, hjust=1)) p2 # Look at the term correlations findAssocs(dtms_2, c("sad" , "health"), corlimit=0) findAssocs(dtms_2, "ppl", corlimit=0.0) cor(as.matrix(dtms_2)[,"sad"], as.matrix(dtms_2)[,"play"]) cor(as.matrix(dtms_2)[,"bitch"], as.matrix(dtms_2)[,"fuck"]) cor(as.matrix(dtms_2)[,"sad"], as.matrix(dtms_2)[,"health"]) #The word clouds #dark2 <- brewer.pal(6, "Dark2") set.seed(1234) wordcloud(names(freq), freq, max.words=100, rot.per=0.35, colors=brewer.pal(8, "Dark2"), random.order=FALSE) #### CLUSTERING d <- dist(t(dtms_2), method="euclidian") fit <- hclust(d=d, method="complete") plot.new() plot(fit, hang=-1) groups <- cutree(fit, k=3) rect.hclust(fit, k=2, border="red") #clustering with earlier defined k (using elbow method) for (i in 2:29) wss[i] <- sum(kmeans(d,centers=i,nstart=25)$withinss) plot(2:29, wss[2:29], type="b", xlab="Number of Clusters",ylab="Within groups sum of squares") d <- dist(t(dtms_2), method="euclidian") kfit <- kmeans(d, 2) clusplot(as.matrix(d), kfit$cluster, color=T, shade=T, labels=2, lines=0, main = 'Cusplot') d <- dist(t(dtms_2), method="euclidian") kfit <- kmeans(d, 3) clusplot(as.matrix(d), kfit$cluster, color=T, shade=T, labels=2, lines=0) ########Clustering with differnt level of sparsity #Removing sparse terms with the maxium sparsity equal to 0.97 dtms_3 <- removeSparseTerms(dtm, 0.97) #Frequencies of dtm freq_2 <- colSums(as.matrix(dtms_3)) wf_2 <- data.frame(word=names(freq_2), freq=freq_2) mean(wf_2$freq) #653.8276 median(wf_2$freq) #444 #Words with the frequency higher than the median p <- ggplot(subset(wf, freq>444), aes(reorder(word, -freq), freq)) p <- p + geom_bar(stat="identity", fill='#cf5a86') p <- p + theme(axis.text.x=element_text(angle=45, hjust=1)) p #Words with the frequency higher than the mean p2 <- ggplot(subset(wf, freq>654), aes(reorder(word, -freq), freq)) p2 <- p2 + geom_bar(stat="identity", fill='#cf5a86') p2 <- p2 + theme(axis.text.x=element_text(angle=45, hjust=1)) p2 #Correlations findAssocs(dtms_3, c("sad" , "health"), corlimit=0) findAssocs(dtms_3, "mental", corlimit=0.0) cor(as.matrix(dtms_3)[,"sad"], as.matrix(dtms_3)[,"health"]) #Word cloud #dark2 <- brewer.pal(6, "Dark2") set.seed(1234) wordcloud(names(freq_2), freq_2, max.words=100, rot.per=0.35, colors=brewer.pal(8, "Dark2"), random.order=FALSE) # CLUSTERING d <- dist(t(dtms_3), method="euclidian") fit <- hclust(d=d, method="complete") plot.new() plot(fit, hang=-1) groups <- cutree(fit, k=3) rect.hclust(fit, k=2, border="red") #clustering with k defined using elbow method wss <- 2:19 for (i in 2:19) wss[i] <- sum(kmeans(d,centers=i,nstart=25)$withinss) plot(2:19, wss[2:19], type="b", xlab="Number of Clusters",ylab="Within groups sum of squares") d <- dist(t(dtms_3), method="euclidian") kfit <- kmeans(d, 2) clusplot(as.matrix(d), kfit$cluster, color=T, shade=T, labels=2, lines=0, main = 'Cusplot') d <- dist(t(dtms_3), method="euclidian") kfit <- kmeans(d, 3) clusplot(as.matrix(d), kfit$cluster, color=T, shade=T, labels=2, lines=0)
77e317ce98dbd70f2c014e632bc7c2accaff88b0
e6131689004ff6d8309da84cb3cd1032b26beaa9
/inst/doc/pseudoprime/man/fermat.test.Rd
bde041b1dbe08137e3d208d01265f124755b7ed9
[]
no_license
cran/roxygen
b2ae93413d3a0f4d0a40d98b2bbe642e6df0382d
561faff04171237ec8720f2e2c5cb423c802d30d
refs/heads/master
2016-09-06T10:02:45.937969
2011-12-23T00:00:00
2011-12-23T00:00:00
null
0
0
null
null
null
null
UTF-8
R
false
false
799
rd
fermat.test.Rd
\name{fermat.test} \alias{fermat.test} \title{Test an integer for primality with Fermat's little theorem.} \usage{fermat.test(n)} \description{Test an integer for primality with Fermat's little theorem.} \details{Fermat's little theorem states that if \eqn{n} is a prime number and \eqn{a} is any positive integer less than \eqn{n}, then \eqn{a} raised to the \eqn{n}th power is congruent to \eqn{a\ modulo\ n}{a modulo n}.} \value{Whether the integer passes the Fermat test for a randomized \eqn{0 < a < n}} \note{\code{fermat.test} doesn't work for integers above approximately fifteen because modulus loses precision.} \references{\url{http://en.wikipedia.org/wiki/Fermat's_little_theorem}} \author{Peter Danenberg \email{pcd@roxygen.org}} \arguments{\item{n}{the integer to test for primality}}
e03c454854f9599718a82dc3641f3cb3d66e5adf
94dcbff4ef2072f5a5ecbb95af1f259f31ad3b20
/man/int.est.pl.Rd
9096fbd34003202bdb8bee3c08be376e3bf66f81
[]
no_license
DistanceDevelopment/WiSP
bf51406076ded020098f4973003eafc05a45d437
e0e2665d6b3b49ba634944b4bb7303be41620e5a
refs/heads/master
2021-06-05T02:54:08.957306
2020-09-14T20:03:59
2020-09-14T20:03:59
9,773,511
0
1
null
2020-09-14T09:30:06
2013-04-30T15:05:50
R
ISO-8859-16
R
false
false
2,900
rd
int.est.pl.Rd
\name{int.est.pl} \alias{int.est.pl} \title{Plot Sampling Method Abundance Estimation: Interval Estimate} \description{ This function calculates confidence intervals for group abundance for the plot sampling method. } \usage{ int.est.pl(samp, HT=FALSE, vlevels = c(0.025, 0.975), ci.type = "boot.nonpar", nboot = 999, plot = TRUE, seed = NULL, ...) } \arguments{ \item{samp}{object of class `sample.plŽ.} \item{HT}{if FALSE, the abundance estimate produced is the MLE, if TRUE it is the Horvitz-Thompson estimate.} \item{vlevels}{vector of percentage levels for confidence interval.} \item{ci.type}{method for constructing the confidence interval. Possible methods are * `normalŽ for a CI based on assumed normality of the estimator, * `boot.parŽ for a parametric bootstrap CI, * `boot.nonparŽ for a nonparametric bootstrap CI.} \item{nboot}{number of bootstrap replications.} \item{plot}{if true the distribution of the estimator of N is to be plotted.} \item{seed}{the number passed to set.seed() to initialise random number generator} \item{...}{other plot parameters} } \value{ An object of class `int.est.plŽ containing the following items: \item{levels}{percentage levels for confidence interval} \item{ci}{the confidence interval} \item{boot.mean}{mean of bootstrap estimates} \item{boot.dbn}{full set of nboot bootstrap estimates.} } \seealso{ \code{\link{generate.sample.pl}}, \code{\link{point.est.pl}} \code{\link{summary.sample.pl}}, \code{\link{plot.sample.pl}} \code{\link{set.seed}} } \examples{ pl.reg <- generate.region(x.length = 100, y.width = 50) pl.dens <- generate.density(pl.reg) pl.poppars<-setpars.population(density.pop=pl.dens, number.groups = 100, size.method = "poisson", size.min = 1, size.max = 5, size.mean = 1, exposure.method = "beta", exposure.min = 2, exposure.max = 10, exposure.mean = 6, exposure.shape = 1) pl.pop <- generate.population(pl.poppars, seed=456) pl.despars<-setpars.design.pl(pl.reg, n.interval.x = 10, n.interval.y = 20,method = "random", area.covered = 0.2) pl.des <- generate.design.pl(pl.despars, seed=789) pl.samp<-generate.sample.pl(pl.pop, pl.des, seed=101112) # normal-based CI pl.int.est.norm<-int.est.pl(pl.samp, vlevels = c(0.025, 0.975), ci.type = "normal", nboot = 99, plot = T, seed=1) summary(pl.int.est.norm) plot(pl.int.est.norm) # parametric bootstrap pl.int.est.pbs<-int.est.pl(pl.samp, vlevels = c(0.025, 0.975), ci.type = "boot.par", nboot = 99, plot = F, seed=NULL) summary(pl.int.est.pbs) plot(pl.int.est.pbs, nclass=20) # nonparametric bootstrap pl.int.est.npbs<-int.est.pl(pl.samp, vlevels = c(0.025, 0.975), ci.type = "boot.nonpar", nboot = 99, plot = F, seed=3) summary(pl.int.est.npbs) plot(pl.int.est.npbs) } \keyword{}
b417a709740756b54ff0d3c4216f3186ac8dcd46
735eab5da1eee942ec4e490a56cc8122e9aabe51
/Project 1 Chess - Jill Anderson.R
4fb413dad2ca34650d523071e0f6ea29eade2a4a
[]
no_license
jillenergy/Project-1-Chess-Tournament
ce496a9617638ab1702459d9c028e0a870056cb0
7f2b920def49087b2c8e05c8bb8344d695b910ae
refs/heads/master
2021-07-02T09:59:32.302320
2017-09-21T02:26:50
2017-09-21T02:26:50
104,290,770
0
0
null
null
null
null
UTF-8
R
false
false
7,021
r
Project 1 Chess - Jill Anderson.R
## Assignment: Read in the raw data of a chess tournament. ## Create a table comprised of the following attributes: Player’s Name, Player’s State, Total Points, Player’s Pre-Rating, and Average Pre-Rating Rating of Opponents ## For the first player, the information would be: Gary Hua, ON, 6.0, 1794, 1605 ## Load library to import data library(stringr) library(DT) library(ggplot2) ## Load raw data file into R Studio tournamentraw <- readLines("~/Desktop/tournamentinfo.txt") ## Evaluate and describe the raw data format to determine what steps will be required to work with the desired attributes head(tournamentraw) tail(tournamentraw) NROW(tournamentraw) NCOL(tournamentraw) ## Header: described in two rows ## Columns: information is separated by | but the table is only one row ## Rows: separated between entries with a series of hyphens ## Each entry: described in two rows; ## the first row includes the player's Name, Total Points, Results of each Round; ## the second row includes the player's USCF ID, rating before and after the tournament ## There are 64 players ## Tidy and process the data in R to make it more usable. ## Primary objective, bring all of the data for each player into one row and extract the requested attributes. ## Determine the the total rows in the dataset tournamentrows <- length(tournamentraw) ## Create a new table with just the rows that start with the Player Names ## Start with the fifth row to account for the header and hyphens PlayerNameRows <- tournamentraw[seq(5, tournamentrows, 3)] head(PlayerNameRows) ## Create a new table with just the rows that start with the Player State ## Start with the sixth row PlayerStateRows <- tournamentraw[seq(6, tournamentrows, 3)] head(PlayerStateRows) ## Player Names ## From PlayerNameRows, extract the Player's Name PlayerName <- str_trim(str_extract(PlayerNameRows, "(\\w+\\s){2,3}")) head(PlayerName) ## Player States ## From PlayerStateRows, extract the Player State taking the first two letters in the string PlayerState <- str_extract(PlayerStateRows, "\\w+") head(PlayerState) ## Total Points ## From the PlayerNameRows, extract the Total Points TotalPoints <- as.numeric(str_extract(PlayerNameRows, "\\d+\\.\\d+")) head(TotalPoints) ## Player Pre-Rating ## From the PlayerStateRows, extract the Player's Pre-Rating by first taking the string and then pulling out the number to remove the spaces PlayerPreRating <- str_extract(PlayerStateRows, "[^\\d]\\d{3,4}[^\\d]") PlayerPreRating <- as.integer(str_extract(PlayerPreRating, "\\d+")) head(PlayerPreRating) ## Calculate the average of the Opponents' Pre-Ratings for each player ## Find Opponents ## From the PlayerStateRows, determine who the opponents of each player were FindOpponents <- str_extract_all(PlayerNameRows, "\\d+\\|") FindOpponents <- str_extract_all(FindOpponents, "\\d+") head(FindOpponents) ## Pair Numbers ## From the PlayerNameRows, extract the pair numbers for use in the opponent pre-rating calculation Pair <- as.integer(str_extract(PlayerNameRows, "\\d+")) head(Pair) ## Run a loop to calculate the mean rating of the opponents of each player using the Pair numbers for all rows AveOpponentRating <- Pair for (i in 1:NROW(Pair)) { AveOpponentRating[i] <- mean(PlayerPreRating[as.numeric(unlist(FindOpponents[Pair[i]]))]) } head(AveOpponentRating) ## Round up the calculated average ratings to the nearest whole integer AveOpponentRating <- round(AveOpponentRating) head(AveOpponentRating) ## Put together all of the extracted data into a final set ## Create a new dataframe with the extracted desired data attributes FinalData <- data.frame(PlayerName, PlayerState, TotalPoints, PlayerPreRating, AveOpponentRating) head(FinalData) ## Rename the columns to improve formating colnames(FinalData) <- c("Player's Name", "Player's State", "Total Number of Points", "Player's Pre-Rating", "Average Rating of Opponents") head(FinalData) ## Export final data set write.csv(FinalData,file="Chess Data Extracted Jill Anderson.csv") ## Reformat data table for display datatable(FinalData) ## Analyze the data ## Basic data analytics ## Statistical spread of Player's Pre-Ratings summary(FinalData$`Player's Pre-Rating`) ## Statistical spread of Average Opponents' Pre-Ratings summary(FinalData$`Average Rating of Opponents`) ## Visualize the data ## Create a historgram of the Player Pre-Ratings hist(FinalData$`Player's Pre-Rating`, breaks = 30, main = "Distribution of Player Ratings Pre-Tournament", xlab = "Player's Pre-Rating", ylab = "Count") ## Create a historgram of the Total Points per Player hist(FinalData$`Total Number of Points`, breaks = 10, main = "Distribution of Total Points", xlab = "Total Points per Player", ylab = "Count") ## Compare players from each state in the following columns: "Player State", "Average Total Number of Points", "Average Player's Pre-Rating", "Average Rating of Opponents" ## PlayerState = MI FinalDataMI = subset(FinalData, PlayerState == "MI") FinalDataMI_Num = round(NROW(FinalDataMI)) FinalDataMI_MeanPoints = round(mean(FinalDataMI$`Total Number of Points`)) FinalDataMI_PlayerPreRating = round(mean(FinalDataMI$`Player's Pre-Rating`)) FinalDataMI_AveOpponent = round(mean(FinalDataMI$`Average Rating of Opponents`)) FinalDataMISum <- data.frame("MI", FinalDataMI_Num, FinalDataMI_MeanPoints, FinalDataMI_PlayerPreRating, FinalDataMI_AveOpponent) colnames(FinalDataMISum) <- c("State", "Number of Players", "Average Total Number of Points", "Average Player Pre-Rating", "Average Rating of Opponents") FinalDataMISum ## PlayerState = ON FinalDataON = subset(FinalData, PlayerState == "ON") FinalDataON_Num = round(NROW(FinalDataON)) FinalDataON_MeanPoints = round(mean(FinalDataON$`Total Number of Points`)) FinalDataON_PlayerPreRating = round(mean(FinalDataON$`Player's Pre-Rating`)) FinalDataON_AveOpponent = round(mean(FinalDataON$`Average Rating of Opponents`)) FinalDataONSum <- data.frame("ON", FinalDataON_Num, FinalDataON_MeanPoints, FinalDataON_PlayerPreRating, FinalDataON_AveOpponent) colnames(FinalDataONSum) <- c("State", "Number of Players", "Average Total Number of Points", "Average Player Pre-Rating", "Average Rating of Opponents") FinalDataONSum ## PlayerState = OH FinalDataOH = subset(FinalData, PlayerState == "OH") FinalDataOH_Num = round(NROW(FinalDataOH)) FinalDataOH_MeanPoints = round(mean(FinalDataOH$`Total Number of Points`)) FinalDataOH_PlayerPreRating = round(mean(FinalDataOH$`Player's Pre-Rating`)) FinalDataOH_AveOpponent = round(mean(FinalDataOH$`Average Rating of Opponents`)) FinalDataOHSum <- data.frame("OH", FinalDataOH_Num, FinalDataOH_MeanPoints, FinalDataOH_PlayerPreRating, FinalDataOH_AveOpponent) colnames(FinalDataOHSum) <- c("State", "Number of Players", "Average Total Number of Points", "Average Player Pre-Rating", "Average Rating of Opponents") FinalDataOHSum ## Combine all summary data by state into one table to compare FinalDataSum <- rbind(FinalDataMISum, FinalDataONSum, FinalDataOHSum) datatable(FinalDataSum)
802d56164faa42664f24f29f38aaf100aa0af132
68563acf6c42af465caa02612f9b536c366ab4c1
/SEEerver.r
895976c5d0022eaeea2890344dbb13c08ccda307
[]
no_license
beekash222/ETL_ML_Xgboost
6bf37e1dcb467368aae781456e7544c3b11f89d1
1ff7e8fd4aa190acd06dd2a96220c54bf71df3b0
refs/heads/master
2020-03-17T00:58:02.923194
2020-01-23T19:22:50
2020-01-23T19:22:50
133,135,574
0
0
null
null
null
null
UTF-8
R
false
false
5,718
r
SEEerver.r
server <- function(input, output,session) { observeEvent(input$rpart, { output$contents <- renderTable({ req(input$file1) inFile <- input$file1 df <- read_excel(inFile$datapath, 1) }) }) ###########Spec Column transformation############# observeEvent(input$rpart1, { output$contents1 <- renderTable({ req(input$file1) inFile <- input$file1 df <- read_excel(inFile$datapath, 3) }) }) observeEvent(input$rpart2, { output$contents2 <- renderTable({ req(input$file1) inFile <- input$file1 df <- read_excel(inFile$datapath, 2) }) }) ######## observeEvent(input$rpart5, { library(readxl) library(RJDBC) req(input$file1) inFile <- input$file1 df1 <- read_excel(inFile$datapath, 2) #df1 <- read_xlsx("C://Users/user//Desktop//ML_21//zz.xlsx",2) df1 <- df1[df1$`TRANSFORMATION RULE` == 'Direct Move',] ###########SOURCE QUERY FRAME############### str <- list() str <- paste(df1$`SOURCE COLUMN`,collapse = ",") drv <- JDBC("oracle.jdbc.driver.OracleDriver","C:/Users/user/Downloads/ojdbc6.jar") jdbcConnection <- dbConnect(drv, "jdbc:oracle:thin:@//localhost:1521/xe",input$`USER NAME`,input$PASSWORD) SRC_TAB <- input$SRC SRC_QUERY <- paste("select",str,"from",SRC_TAB) SRC <- dbGetQuery(jdbcConnection, SRC_QUERY) #########TARGET QUERY FRAME############ str1 <- list() str1 <- paste(df1$`TARGET COLUMN`,collapse = ",") library(RJDBC) drv <- JDBC("oracle.jdbc.driver.OracleDriver","C:/Users/user/Downloads/ojdbc6.jar") jdbcConnection <- dbConnect(drv, "jdbc:oracle:thin:@//localhost:1521/xe",input$`USER NAME`,input$PASSWORD) Trg_TAB <- input$TRG TRG_QUERY <- paste("select",str1,"from",Trg_TAB) TRG <- dbGetQuery(jdbcConnection, TRG_QUERY) SRC[is.na(SRC)]<-'SPACE' TRG[is.na(TRG)]<-'SPACE' for(i in 1:nrow(SRC)) { for(j in 1:ncol(SRC)) { if(SRC[i,j] != TRG[i,j]) { SRC[i,j] <- paste("SOURCE ->",SRC[i,j],":",TRG[i,j],"<- TARGET") } } } output$contents5 <-renderTable({ SRC }) } ) ################################# observeEvent(input$rpart4, { #######source count########### drv <- JDBC("oracle.jdbc.driver.OracleDriver","C:/Users/user/Downloads/ojdbc6.jar") jdbcConnection <- dbConnect(drv, "jdbc:oracle:thin:@//localhost:1521/xe",input$`USER NAME`,input$PASSWORD) SRC_TAB <- input$SRC SRC_CNT <- paste("select count(*) from",SRC_TAB) SRC_COUNT <- dbGetQuery(jdbcConnection,SRC_CNT ) #SRC_COUNT_NEW <- paste("SOURCE COUNT IS ->",SRC_COUNT) #######target count########### drv <- JDBC("oracle.jdbc.driver.OracleDriver","C:/Users/user/Downloads/ojdbc6.jar") jdbcConnection <- dbConnect(drv, "jdbc:oracle:thin:@//localhost:1521/xe",input$`USER NAME`,input$PASSWORD) TRG_TAB <- input$TRG TRG_CNT <- paste("select count(*) from",TRG_TAB) TRG_COUNT <- dbGetQuery(jdbcConnection,TRG_CNT ) #TRG_COUNT_NEW <- paste("TARGET COUNT IS ->",TRG_COUNT) ############mismatch# req(input$file1) inFile <- input$file1 df1 <- read_excel(inFile$datapath, 2) #df1 <- read_xlsx("C://Users/user//Desktop//ML_21//zz.xlsx",2) df1 <- df1[df1$`TRANSFORMATION RULE` == 'Direct Move',] ###########SOURCE QUERY FRAME############### str <- list() str <- paste(df1$`SOURCE COLUMN`,collapse = ",") drv <- JDBC("oracle.jdbc.driver.OracleDriver","C:/Users/user/Downloads/ojdbc6.jar") jdbcConnection <- dbConnect(drv, "jdbc:oracle:thin:@//localhost:1521/xe",input$`USER NAME`,input$PASSWORD) SRC_TAB <- input$SRC SRC_QUERY <- paste("select",str,"from",SRC_TAB) SRC <- dbGetQuery(jdbcConnection, SRC_QUERY) #########TARGET QUERY FRAME############ str1 <- list() str1 <- paste(df1$`TARGET COLUMN`,collapse = ",") library(RJDBC) drv <- JDBC("oracle.jdbc.driver.OracleDriver","C:/Users/user/Downloads/ojdbc6.jar") jdbcConnection <- dbConnect(drv, "jdbc:oracle:thin:@//localhost:1521/xe",input$`USER NAME`,input$PASSWORD) Trg_TAB <- input$TRG TRG_QUERY <- paste("select",str1,"from",Trg_TAB) TRG <- dbGetQuery(jdbcConnection, TRG_QUERY) colnames(SRC) <- colnames(TRG) #minus <- setdiff(SRC,TRG) new_data <- setdiff(SRC,TRG) new_data1 <- setdiff(TRG,SRC) new_data3 <- rbind(new_data,new_data1) new_data3 <- new_data3[vapply(new_data3, function(x) length(unique(x)) > 1, logical(1L))] d <- names(new_data3) d <- paste(d,collapse = ",") df_count <- data.frame(matrix(ncol =2,nrow =4)) x <- c("QUERY","COUNT") colnames(df_count) <- x df_count$QUERY[1] <- SRC_CNT df_count$QUERY[2] <- TRG_CNT df_count$QUERY[3] <- "Number of Mismatch Record" df_count$QUERY[4] <- "Mismatch Columns" df_count$COUNT[1] <- SRC_COUNT df_count$COUNT[2] <- TRG_COUNT df_count$COUNT[3] <- nrow(new_data) df_count$COUNT[4] <- d output$contents4 <-renderTable({ df_count }) }) #####download############ }
5a6c399528e5c67bc57e9ac6cb8221dcebc0bf9a
58554949cc0ed4d1d940136c496072f167a1b485
/r/R/col.R
9e2a5aaadcc0d85ce84dde556d12dffbab2ebf54
[ "Apache-2.0" ]
permissive
wbeck32/libcore
284568484288a4cee100e172f250f5ee7c1da411
8d01b38f16ab3f27c88a3cfcc1344f39963aa435
refs/heads/master
2021-08-11T16:21:28.463209
2017-11-13T22:27:05
2017-11-13T22:27:05
110,302,874
0
0
null
2017-11-10T23:40:30
2017-11-10T23:40:30
null
UTF-8
R
false
false
580
r
col.R
col <- function(table, column) { assert(is_table(table), "`table` parameter must be a table") if (is_integer(column)) { if (column < 0) { stop("`column` parameter must be greater than zero") } if (column > ncol(table)) { stop("`column` parameter is greater than the number of columns in table") } } else if (is_string(column)) { if (!(column %in% colnames(table))) { stop("`column` parameter does not match a column name in table") } } else { stop("`column` parameter must be an integer or a string") } table[, column] }
5475b971f90b999be36ea5ff026e037eca75bb62
310b4062d798583be0f76d173e1b369a2c8e4a20
/R/plotPhiSeries.R
23d9b83f0ac8aefec0d2c776b95c3438e3fda5c8
[]
no_license
stcolema/mdiHelpR
d5600d7259ddcf8cdd1402820a553c2df63b1462
cbbed03ab2ba71b84cdf3e3da119ccd239c75630
refs/heads/master
2023-06-09T16:14:08.491622
2021-06-30T12:16:41
2021-06-30T12:16:41
225,420,253
2
1
null
null
null
null
UTF-8
R
false
false
3,543
r
plotPhiSeries.R
#!/usr/bin/env Rscript # Function to save plot of phi parameter from MDI from each iteration of MCMC #' @importFrom dplyr select contains #' @importFrom ggplot2 ggplot aes geom_point labs ggsave #' @importFrom magrittr set_colnames set_rownames #' @importFrom pheatmap pheatmap #' @export plotPhiSeries <- function(mcmc_out_lst, file_path, num_files, num_datasets, start_index, eff_n_iter, save_plots = F, col_pal = grDevices::colorRampPalette(c("white", "#146EB4"))(100), burn = 0, thin = 1) { phis <- list() count <- 0 # print("Saving plots of phi as a time series.") # Create directory to save this output in loc_dir <- paste0(file_path, "Phi_series_plots/") dir.create(loc_dir, showWarnings = FALSE) # Iterate over the files and then the combinations of phis for (i in 1:num_files) { # For heatmapping the phis across datasets phi_comparison_df <- as.data.frame(matrix( nrow = num_datasets, ncol = num_datasets, 0 )) %>% magrittr::set_colnames(dataset_names) %>% magrittr::set_rownames(dataset_names) for (j in 1:(num_datasets - 1)) { for (k in (j + 1):num_datasets) { # Count for the index of the list object where phis are stored count <- count + 1 col_name <- paste0("Phi_", j, k) # Pull out the column for the relevant phi phis[[count]] <- mcmc_out_lst[[i]] %>% dplyr::select(dplyr::contains(col_name)) # Which tissues dataset_j <- dataset_names[[j]] dataset_k <- dataset_names[[k]] # Create plot labels plot_title <- bquote(Phi ~ "for" ~ .(dataset_j) ~ "and" ~ .(dataset_k)) y_axis_title <- substitute(Phi[ind1], list(ind1 = paste0(j, k))) sub_title <- paste("Iterations", (burn + thin), "through", n_iter) # The save file name save_title <- paste0(loc_dir, "file_", i, "_Phi_", j, k, plot_type) # Put things in a data frame to use ggplot my_data_frame <- data.frame( Index = start_index:eff_n_iter * thin, Phi = phis[[count]][[1]][start_index:eff_n_iter] ) # Plot ggplot2::ggplot(data = my_data_frame, ggplot2::aes(x = Index, y = Phi)) + ggplot2::geom_point() + ggplot2::labs( title = plot_title, # subtitle = sub_title, y = y_axis_title, x = "Iteration" ) # Save ggplot2::ggsave(save_title) mean_phi <- phis[[count]][[1]][start_index:eff_n_iter] %>% mean() phi_comparison_df[j, k] <- phi_comparison_df[k, j] <- mean_phi } } # Heatmap the average phi (after some burn in) phi_pheatmap_title <- "Heatmap comparing phis across datasets" phi_pheatmap_file_name <- paste0(save_path, "Phi_heatmap_", i, plot_type) if (save_plots) { pheatmap::pheatmap(phi_comparison_df, main = phi_pheatmap_title, cluster_rows = F, cluster_cols = F, filename = phi_pheatmap_file_name, color = col_pal ) # dev.off() } else { pheatmap::pheatmap(phi_comparison_df, main = phi_pheatmap_title, cluster_rows = F, cluster_cols = F, color = col_pal # filename = phi_pheatmap_file_name ) } } # phis }
1f4d9905a4cc3f0ce6ea21e7ae8f0666e8aa3815
52792b52803988179e18a52ca43fc7684abd3df2
/soma_temporary_operations_public.R
99e82cdafb465b2eacc7cc34ad5aed42b8fd8d68
[]
no_license
bsmiller25/db-management
3e22f2317ce28f2629f506c1ef7eb55aaac12ef2
27979ffc32bc3922b9073ab286ef6d6c64756cca
refs/heads/master
2021-01-10T12:54:01.969676
2016-01-20T20:20:10
2016-01-20T20:20:10
50,057,542
0
0
null
null
null
null
UTF-8
R
false
false
3,390
r
soma_temporary_operations_public.R
### Collect Public SOMA Temporary Operations Data soma_temporary_operations_public <- function(task = c("update","create")){ require("DBI") require("RPostgreSQL") require("XML") require("gdata") require("tis") task <- match.arg(task) # parameters to change if run elsewhere setwd("/mma/prod/MBS/MBS_Portfolio_Analytics/") dbname <- "ma" host <- "sqldev" schema_location <- "set search_path to mma;" all_permissions <- c("mma") select_permissions <- c("mma","dma") # connect to SQL drv <- dbDriver("PostgreSQL") con <- dbConnect(drv,dbname=dbname,host=host) searchPath <- dbSendQuery(con,statement=schema_location) if(task == "create"){ # download and bring in the data date1 <- format(previousBusinessDay(as.Date("2007-01-03")),"%m%d%Y") date2 <- format(previousBusinessDay(today()),"%m%d%Y") } if(task == "update"){ lastdate <- dbGetQuery(con, "select max(deal_date) from soma_temporary_operations;")$max date1 <- format(nextBusinessDay(lastdate),"%m%d%Y") date2 <- format(today(),"%m%d%Y") } # URL has changed in the past url <- paste0("https://websvcgatewayx2.frbny.org/autorates_tomo_external/services/v1_0/tomo/retrieveHistoricalExcel?f=", date1,"&t=",date2,"&ctt=true&&cta=true&ctm=true") download.file(url, "tomo.xls","wget") tomo <- read.xls("tomo.xls", stringsAsFactors = FALSE) # clean the data colnames(tomo) <- tolower(colnames(tomo)) colnames(tomo) <- gsub("\\.","_",colnames(tomo)) tomo[tomo == "N/A"] <- NA # format dates dates <- c("deal_date", "delivery_date", "maturity_date") for(d in dates){ tomo[,d] <- format(as.Date(tomo[,d],format="%m/%d/%Y"),"%Y-%m-%d") } # format numbers tomo <- tomo[order(tomo$deal_date),] nums <- c("tsy_submit", "tsy_accept", "tsy_stop_out", "tsy_award", "tsy_wght_avg", "tsy_high", "tsy_low", "tsy_pctatstopout", "agy_submit", "agy_accept", "agy_stop_out", "agy_award", "agy_wght_avg", "agy_high", "agy_low", "agy_pctatstopout", "mbs_submit", "mbs_accept", "mbs_stop_out", "mbs_award", "mbs_wght_avg", "mbs_high", "mbs_low", "mbs_pctatstopout", "total_submit","total_accept") for(n in nums){ tomo[,n] <- as.numeric(tomo[,n]) } # push to sql if(task == "create"){ if(dbExistsTable(con, "soma_temporary_operations")){ dbSendQuery(con, "drop table soma_temporary_operations;") } dbWriteTable(con, "soma_temporary_operations", tomo, row.names = FALSE) dbSendQuery(con,statement = "set client_min_messages to warning;") dbSendQuery(con, statement = "alter table soma_temporary_operations add primary key (op_id);") for(n in nums){ dbSendQuery(con, statement = paste("alter table soma_temporary_operations alter column ",n," type numeric using ",n,"::numeric",sep="")) } # handle permissions # all for(group in all_permissions){ command <- paste0("grant all on soma_temporary_operations to ",group,";") dbSendQuery(con, statement = command) } # select for(group in select_permissions){ command <- paste0("grant select on soma_temporary_operations to ",group,";") dbSendQuery(con, statement = command) } } if(task == "update"){ dbWriteTable(con, "soma_temporary_operations", tomo, row.names = FALSE, append = TRUE) } }
d8ccfd5c059bd763864e142d0b53359a83ae5907
d45320d2fc526a13c54c6e2f172286cc17cac96a
/Illinois_GM_Score.R
95a8924572176bcab1bebc3dc331346bcb245944
[]
no_license
emryskaya/MathOfGerrymandering
bd55e39e5b9cee437504a0feebaf4fdf2eeb80db
0937ddc4d01c58571cedff080b5bf63b712b1419
refs/heads/master
2023-01-01T10:37:43.353431
2020-10-26T05:04:35
2020-10-26T05:04:35
291,190,616
0
0
null
null
null
null
UTF-8
R
false
false
2,816
r
Illinois_GM_Score.R
library(Matrix) library(tigris) library(sp) library(sf) library(dplyr) library(tidyverse) library(rgeos) library(parallel) library(leaflet) ##Tract Level IL_tracts <- tracts(state = 'IL') IL_tracts <- IL_tracts[order(IL_tracts$TRACTCE),] sf_IL_tracts <- st_as_sf(IL_tracts) IL_tract_adjacency_list <- st_intersects(sf_IL_tracts, sf_IL_tracts) IL_tract_adjacency_df <- as.data.frame(IL_tract_adajcency_list) IL_tract_adjacency_matrix <- sparseMatrix(i = abcddf$row.id, j = abcddf$col.id) IL_tract_adjacency_matrix ##Block Level IL_blocks <- tigris::blocks(state = 'IL') IL_blocks <- IL_blocks[order(IL_blocks$TRACTCE10, IL_blocks$BLOCKCE10),] IL_blocks@data <- cbind(IL_blocks@data, orderno = 1:length(IL_blocks$GEOID10)) sf_IL_blocks <- st_as_sf(IL_blocks) IL_block_adjacency_list <- st_intersects(sf_IL_blocks,sf_IL_blocks) IL_block_adjacency_df <- as.data.frame(block_adjacency_list) IL_block_adjacency_matrix <- sparseMatrix(i = block_adjacency_df$row.id, j = block_adjacency_df$col.id) ##Congressional Districts congress <- congressional_districts() IL_congress <- congress[congress$STATEFP == 17, ] IL_congress@data sf_IL_congress <- st_as_sf(IL_congress) plot(st_geometry(sf_IL_congress)) ##State Legislatures IL_state_sen <- state_legislative_districts("IL") IL_state_cong <- state_legislative_districts("IL", house = "lower") sf_IL_state_sen <- st_as_sf(IL_state_sen) sf_IL_state_cong <- st_as_sf(IL_state_cong) ##Scoring Function gscore <- function(x,y) { border_blocks_list <- st_touches(y[x,], sf_IL_blocks) border_blocks_df <- as.data.frame(border_blocks_list) abc <- as.integer(border_blocks_df[,"col.id"]) within_blocks_list <- st_within(sf_IL_blocks, y[x,]) within_blocks_df <- as.data.frame(within_blocks_list) def <- as.integer(within_blocks_df[,"row.id"]) sgbpl <- st_intersects(sf_IL_blocks[abc,], sf_IL_blocks[def,]) no_cuts <- length(unlist(sgbpl)) return(no_cuts) } ##Get IL state house of representatives score start <- Sys.time() scorelist <- mclapply(as.list(1:nrow(sf_IL_state_cong)), gscore, y = sf_IL_state_cong) end <- Sys.time() end - start scorelistcolumn <- as.integer(scorelist) scorelistcolumn sf_IL_state_cong <- cbind(sf_IL_state_cong, gmscore = scorelistcolumn) ilstcng <- as.data.frame(sf_IL_state_cong) names(ilstcng) fcdf = subset(ilstcng, select = -c(geometry)) write.csv(fcdf, "Il_state_cong_w_gmscore.csv") ## Get US Congress scores for IL start <- Sys.time() scorelist2 <- mclapply(as.list(1:nrow(sf_IL_congress)), gscore, y = sf_IL_congress) end <- Sys.time() end - start scorelistcolumn2 <- as.integer(scorelist2) scorelistcolumn2 sf_IL_congress <- cbind(sf_IL_congress, gmscore = scorelistcolumn2) ilcng <- as.data.frame(sf_IL_congress) names(ilcng) abcd = subset(ilcng, select = -c(geometry)) write.csv(abcd, "Il_cong_w_gmscore.csv")
1db7d533ab1a2668cbc67c58ef5014a9a28791b4
179e15f315e5b6936a9873cc7c20f8f45e6bd0e8
/run_analysis.R
53088687ab9b62596b04c79c0e5b0e6f9a967b08
[]
no_license
mehadesai/gettingandcleaningdata
610c1432445ce3b7bc11b28c7a1aef3bc3ebee23
5a80c5b2e701217bf1d98a7249aaf91d30ebe85c
refs/heads/master
2016-09-05T15:18:06.739207
2014-11-23T21:29:48
2014-11-23T21:29:48
null
0
0
null
null
null
null
UTF-8
R
false
false
2,970
r
run_analysis.R
# install.packages(c('data.table', 'reshape2', 'plyr')) library('data.table') library('plyr') library('reshape2') ## PART 1 base_dir <- paste(getwd(), '/UCI\ HAR\ Dataset/', sep = '', collapse = NULL) # read data from test set. test_set_file_path <- paste(base_dir, 'test/X_test.txt', sep = '', collapse = NULL) test_set <- read.table(test_set_file_path) test_labels_file_path <- paste(base_dir, 'test/y_test.txt', sep = '', collapse = NULL) test_labels <- read.table(test_labels_file_path, col.names="label") test_subjects_file_path <- paste(base_dir, 'test/subject_test.txt', sep = '', collapse = NULL) test_subjects <- read.table(test_subjects_file_path, col.names="subject") # read data from training set. training_set_file_path <- paste(base_dir, 'train/X_train.txt', sep = '', collapse = NULL) training_set <- read.table(training_set_file_path) training_labels_file_path <- paste(base_dir, 'train/y_train.txt', sep = '', collapse = NULL) training_labels <- read.table(training_labels_file_path, col.names="label") training_subjects_file_path <- paste(base_dir, 'train/subject_train.txt', sep = '', collapse = NULL) training_subjects <- read.table(training_subjects_file_path, col.names="subject") # merge data from test set and training set. merged_test_training_data <- rbind(cbind(test_subjects, test_labels, test_set), cbind(training_subjects, training_labels, training_set)) ## PART 2 # mean and standrad deviation are estimated from the signals provided in the features. # we need to parse all features and keep only mean and standard deviation. features_file_path <- paste(base_dir, 'features.txt', sep = '', collapse = NULL) all_features <- read.table(features_file_path, strip.white=TRUE, stringsAsFactors=FALSE) #mean_std_dev_info <- all_features[grep("mean\\(\\)|std\\(\\)", all_features$V2), ] mean_features <- all_features[grep("mean\\(\\)", all_features$V2), ] mean_data <- merged_test_training_data[, c(1, 2, mean_features$V1+2)] std_dev_features <- all_features[grep("std\\(\\)", all_features$V2), ] std_dev_data <- merged_test_training_data[, c(1, 2, std_dev_features$V1+2)] ## PART 3 # Map activity_labels to the data set activity_labels_file_path <- paste(base_dir, 'activity_labels.txt', sep = '', collapse = NULL) all_activity_labels = read.table(activity_labels_file_path, , stringsAsFactors=FALSE) #mean_std_dev_info$activity_label = factor(mean_std_dev_info$activity, levels = available_levels, labels = all_activity_labels$V2) mean_data$label <- all_activity_labels[mean_data$label, 2] std_dev_data$label <- all_activity_labels[std_dev_data$label, 2] ## PART 4 # labeling the data with descriptive variable names # already labeled in part 3 above ## PART 5 # get mean of each variable for each activity and each subject. final_data <- data.frame() final_data <- rbind(final_data, mean_data) ## WRITE TABLE write.table(format(final_data, scientific = TRUE), "tidy_final.txt", row.name = FALSE)
5979d677c231795c9dab591c86318f30acdc06ca
2a2d3489886a0e4bd5b76ca726adc3b7f44386cb
/standalone/scores_ml_standalone.R
a76f3ce4678a6347c8e6c42fbdc87761fb810102
[ "MIT" ]
permissive
liufan-creat/magic
68d51fdf847dda49500f5a963d4fce74198c9462
a672b94c9262335cbec68e6817cd4de8eb701c65
refs/heads/master
2021-10-23T16:11:02.362069
2019-03-18T18:29:45
2019-03-18T18:29:45
null
0
0
null
null
null
null
UTF-8
R
false
false
9,346
r
scores_ml_standalone.R
#!/usr/bin/env Rscript # Generates classifiers for scores data output from predict_mae.R ###### ### LIBRARIES ###### # Loads or installs all required packages load_libraries <- function() { get_package("caret", dependencies = TRUE) get_package("doMC", repos = "http://R-Forge.R-project.org") get_package("pROC") get_package("ada") get_package("mboost") get_package("randomForest") get_package("neuralnet") get_package("kernlab") get_package("lattice") get_package("optparse") get_package("dplyr") get_package("evtree") } ###### ### UTILITY FUNCTIONS ###### # Checks if a package is installed, and installs it if specified. # Also loads the package get_package <- function(package_name, repos = "", dependencies = FALSE) { if(!is.element(package_name, installed.packages()[,1])) { if (repos == "") { install.packages(package_name, dependencies) } else { install.packages(package_name, repos) } } if(!suppressMessages(require(package_name, character.only = TRUE))) { stop(paste("Cannot load package", package_name)) } } # Wrapper for cat to write to output file cat_f <- function(s, file, append = TRUE) { cat(s, file = file, sep = "\n", append = append) } # Wrapper for sink to write an object to a file sink_f <- function(s, file, append = TRUE) { sink(file, append) print(s) sink(NULL) } ###### ### MAIN FUNCTIONS ###### # Generates the training and testing sets for the given data frame. # Returns list of [training, testing] generate_sets <- function(df, target_feature, seed = 50, split = 0.8) { # Sets seed to make this reproducible set.seed(seed) # Splits data using cutoff indice for given percent of sample size cutoff <- createDataPartition(df[target_feature], p = split, list = FALSE, times = 1) training <- df[cutoff,] test <- df[-cutoff,] # Returns list with given dataframes return(list(training, test)) } # Runs machine learning analysis on training data using the specified method generate_model <- function(training, method, target_feature, log_file, selection_rule, seed = 50) { # Sets seed to make this reproducible and gets begin time set.seed(seed) start_time <- Sys.time() # Only keeps certain features h3k27me3_cols <- c("h3k27me3_body_percentile", "h3k27me3_promoter_percentile", "h3k27me3_body_norm_sum", "h3k27me3_promoter_norm_sum") h3k36me3_cols <- c("h3k36me3_body_percentile", "h3k36me3_promoter_percentile", "h3k36me3_body_norm_sum", "h3k36me3_promoter_norm_sum") h3k9me2_cols <- c("h3k9me2_body_percentile", "h3k9me2_promoter_percentile", "h3k9me2_body_norm_sum", "h3k9me2_promoter_norm_sum") cpg_cols <- c("cpg_body_percentile", "cpg_promoter_percentile", "cpg_body_norm_sum", "cpg_promoter_norm_sum") cols_to_keep <- c(target_feature, h3k27me3_cols, h3k36me3_cols, h3k9me2_cols, cpg_cols ) #training <- training[, colnames(training) %in% cols_to_keep] # Gets training formula train_formula <- as.formula(paste(target_feature, " ~ .", sep = "")) # Sets up 5-fold cross-validation training control method and trains model. # If only two classes, uses ROC instead of accuracy and kappa train_control <- NULL model <- NULL if (nlevels(training[[target_feature]]) == 2) { train_control <- trainControl(method = "cv", number = 5, summaryFunction = twoClassSummary, selectionFunction = selection_rule, classProbs = TRUE, savePredictions = TRUE) model <- train(train_formula, data = training, method = method, metric = "ROC", na.action = na.omit, trControl = train_control) } else { train_control <- trainControl(method = "cv", number = 5, selectionFunction = selection_rule, savePredictions = TRUE) model <- train(train_formula, data = training, method = method, na.action = na.omit, trControl = train_control) } # Prints total time required sink_f(paste(method, "time taken:", Sys.time() - start_time), log_file) # Explicitly returns model return(model) } # Wrapper for generate_model to generate a single classifer, save it as an Rdata # file and print a summary of it to a text file generate_classifier <- function(training, testing, classifier, target_feature, output_folder, log_file, selection_rule) { # Generates model and logs stats to file model <- generate_model(training, classifier, target_feature, log_file, selection_rule) sink_f(model, log_file) # Saves model to file model_file <- file.path(output_folder, paste(classifier, "_model.rds", sep = "")) saveRDS(model, file = model_file) # Generates predictions on testing data and writes confusion matrix to file predictions <- predict(model, testing) sink_f("", log_file) sink_f(postResample(predictions, testing[[target_feature]]), log_file) if (nlevels(training[[target_feature]]) == 2) { sink_f(paste("sensitivity:", sensitivity(predictions, testing[[target_feature]])), log_file) sink_f(paste("specificity:", specificity(predictions, testing[[target_feature]])), log_file) cMat <- as.table(confusionMatrix(predictions, testing[[target_feature]])) matrix_file <- file.path(output_folder, paste(classifier, "matrix", sep = "_")) write.table(cMat, file = matrix_file, quote = FALSE) } # Writes predictions to file testing$predictions <- predictions testing_file <- file.path(output_folder, paste(classifier, "_testing.tsv", sep = "")) write.table(testing, file = testing_file, sep = "\t", quote = FALSE, row.names = FALSE, col.names = TRUE) } # Runs machine learning analyses on data scores_ml <- function(scores_file, target_feature, method, output_folder, selection_rule, clean = FALSE, cores = 4) { # Creates output folder if it doesn't exist if (!dir.exists(output_folder)) { dir.create(output_folder) } # Resets summary file summary_file <- file.path(output_folder, paste(method, "summary.txt", sep = "_")) sink_f("Summary file", summary_file, FALSE) # Enables multicore processing using doMC package registerDoMC(cores = cores) # Loads scores file into a data frame and removes unnecesary cols df <- read.csv(scores_file, sep = "\t") df <- df[, !(names(df) %in% c("start", "end", "chrom", "name"))] # Splits data into training and testing sets partition <- createDataPartition(df$status, times = 1, p = 0.8, list = FALSE) training <- df[partition,] testing <- df[-partition,] # Writes training and testing dataframes to output folder write.table(testing, file = file.path(output_folder, "testing_set.csv"), sep = "\t", quote = FALSE, row.names = FALSE, col.names = TRUE) # Generates model and saves it to output folder generate_classifier(training, testing, method, target_feature, output_folder, summary_file, selection_rule) } ################## # COMMAND LINE INTERFACE ################## # Loads required libraries load_libraries() # Builds option list using optparse options = list( make_option(c("-i", "--input_file"), type="character", default=NULL, help="percentiles generated from predict_mae.R"), make_option(c("-o", "--output_folder"), type="character", default="output", help="path to output folder"), make_option(c("-f", "--target_feature"), type="character", default="status", help="target feature for classification [default= %default]"), make_option(c("-m", "--method"), type="character", default="rf", help="classification method to use [default= %default]"), make_option(c("-s", "--selection_rule"), type="character", default="best", help="model selection method to use [default= %default]"), make_option(c("-k", "--cores"), type="integer", default=4, help="number of cores to run on [default= %default]") ) # Gets options and checks arguments opt <- parse_args(OptionParser(option_list = options)) if (!file.exists(opt$input_file)) { stop("input file does not exist") } if (!dir.exists(opt$output_folder)) { dir.create(opt$output_folder) } # Extracts variables from args input_file <- opt$input_file output_folder <- opt$output_folder target_feature <- opt$target_feature method <- opt$method selection_rule <- opt$selection_rule cores <- opt$cores # Calls main function scores_ml(input_file, target_feature, method, output_folder, selection_rule, cores)
672e9d4930f7ba48748a27a5d2e3ee63a6049c90
0f172b6f94115e34fab3994a4c95a047294e36fa
/R/Read_docs.R
bf9aea3fa9a4ba81daf7b079dea600a8297b341f
[]
no_license
jcval94/DataMiningTools
866932e4df4f1e2e645a14bc966921395737d6b4
fb4e7995b2f5acee742492e52fd2ad982d7b4d59
refs/heads/master
2020-07-07T03:42:10.047077
2020-01-07T05:38:26
2020-01-07T05:38:26
203,234,373
0
0
null
null
null
null
UTF-8
R
false
false
2,272
r
Read_docs.R
library(purrr) library(assertthat) library(readr) library(textreadr) Read_docs <- function(dir = getwd(), text_, deep = 2, word_pdf_omit = T) { if (missing(text_)) { warning("text_ must have a value") return(invisible()) } lt <- list.files(dir) SPL <- do.call(c, map(lt, ~strsplit(.x, ".", fixed = TRUE))) len <- map_int(SPL, ~unlist(length(.x))) SPL <- SPL[len > 1] Reed <- lt[len > 1] Reed <- Reed[purrr::map_lgl(SPL, ~"R" %in% .x[[2]] | "txt" %in% .x[[2]] | "pdf" %in% .x[[2]])] Repeat <- lt[len == 1] Doovs <- data.frame(Doc = 1, Loc = 1, Text = 1)[-1, ] Doov <- data.frame(Doc = 1, Loc = 1, Text = 1)[-1, ] if (length(Repeat) > 0 & deep > 1) { for (i in 1:length(Repeat)) { Doov = rbind(Doov, Read_docs(dir = paste0(dir, "/", Repeat[i]), text_)) } } if (length(Reed) > 0) { txt <- Reed[endsWith(Reed, ".txt") | endsWith(Reed, "R") | endsWith(Reed, "pdf") | endsWith(Reed, "docx")] dir_txt <- paste0(dir, "/", txt) readd <- function(tt, dir_, text_. = text_) { i <- 0 if (length(tt) > 0) { for (t in 1:length(tt)) { formato <- strsplit(tt, ".", fixed = T)[[1]][2] if (formato == "docx" & !word_pdf_omit) { Leer <- try(read_docx(dir_[t]), silent = T) } else if (formato == "pdf" & !word_pdf_omit) { Leer <- try(read_pdf(dir_[t])$text, silent = T) } else { Leer <- try(readChar(, file.info(dir_[t])$size), silent = T) } if (is.error(Leer)) { Leer <- try(read_file(dir_[t])) if (is.error(Leer)) { Leer <- "" } } return(c(dir_, grepl(text_, Leer))) } } } for (i in 1:length(txt)) { Info = readd(txt[i], dir_txt[i]) Doovs <- rbind(Doovs, data.frame(Doc = txt[i], Loc = Info[1], Text = Info[2])) } } if (nrow(Doov) > 0) { return(rbind(Doov, Doovs)) } else { return(Doovs) } }
a049f8613bb93c2161c68405ea77da86d1c6416a
7374303c14e64c42bed64be1c8aff78e9aefa3d8
/R/kdr.R
a36bb08a3c3426728eac375d0002f5a0999b9fcd
[]
no_license
cran/ks
cd7d27f9a0d865f577c0bc4e857dbeca09ed55a6
f571ffa28e9dbc5ab649b4f6ac30879cf8fad43c
refs/heads/master
2022-11-30T03:47:41.411752
2022-11-24T02:40:02
2022-11-24T02:40:02
17,696,943
6
6
null
null
null
null
UTF-8
R
false
false
14,010
r
kdr.R
###################################################################### ## Kernel density ridge estimation for 2D/3D data ##################################################################### kdr <- function(x, y, H, p=1, max.iter=400, tol.iter, segment=TRUE, k, kmax, min.seg.size, keep.path=FALSE, gridsize, xmin, xmax, binned, bgridsize, w, fhat, density.cutoff, pre=TRUE, verbose=FALSE) { ## default values xnames <- parse.name(x) x <- as.matrix(x) x.orig <- x if (pre) { S12 <- diag(apply(x.orig, 2, sd)) Sinv12 <- matrix.pow(S12,-1) x <- pre.scale(x) if (!missing(xmin)) xmin <- xmin %*% Sinv12 if (!missing(xmax)) xmax <- xmax %*% Sinv12 rescale <- function(x) { as.matrix(x) %*% S12 } } ksd <- ks.defaults(x=x, binned=binned, bgridsize=bgridsize, gridsize=gridsize) d <- ksd$d; n <- ksd$n; w <- ksd$w binned <- ksd$binned bgridsize <- ksd$bgridsize gridsize <- ksd$gridsize ## default bandwidth if (missing(H)) H <- Hpi(x=x, nstage=2-(d>2), binned=binned, deriv.order=2, verbose=verbose) Hinv <- chol2inv(chol(H)) tol <- 3.7 tol.H <- tol * diag(H) if (missing(xmin)) xmin <- apply(x, 2, min) - tol.H if (missing(xmax)) xmax <- apply(x, 2, max) + tol.H if (missing(tol.iter)) tol.iter <- 1e-3*min(apply(x, 2, IQR)) if (missing(y)) { xx <- seq(xmin[1], xmax[1], length = gridsize[1]) yy <- seq(xmin[2], xmax[2], length = gridsize[2]) if (d==2) y <- expand.grid(xx, yy) else if (d==3) { zz <- seq(xmin[3], xmax[3], length = gridsize[3]) y <- expand.grid(xx, yy, zz) } } else { y <- as.matrix(y); if (pre) y <- y %*% Sinv12 } if (is.vector(y)) y <- matrix(y, nrow=1) if (missing(min.seg.size)) min.seg.size <- round(1e-3*nrow(y), 0) ## exclude low density regions from ridge search if (missing(fhat)) fhat <- kde(x=x, w=w, binned=binned) if (missing(density.cutoff)) density.cutoff <- contourLevels(fhat, cont=99) y.ind <- predict(fhat, x=y)>density.cutoff y <- y[y.ind,] fhat2 <- kdde(x=x, H=H, deriv.order=2, xmin=xmin, xmax=xmax, binned=binned, bgridsize=bgridsize, gridsize=gridsize, w=w, verbose=verbose) ## projected gradient mean shift iterations n.seq <- block.indices(n, nrow(y), d=d, r=0, diff=FALSE)#, block.limit=1e6) if (verbose) pb <- txtProgressBar() pc <- list() i <- 1 if (verbose) setTxtProgressBar(pb, i/(length(n.seq)-1)) pc <- kdr.base(x=x, fhat2=fhat2, y=y[n.seq[i]:(n.seq[i+1]-1),], H=H, tol.iter=tol.iter, Hinv=Hinv, verbose=verbose, max.iter=max.iter, p=p) if (pre) { pc[c("x","y","end.points")] <- lapply(pc[c("x","y","end.points")], rescale) pc[["path"]] <- lapply(pc[["path"]], rescale) } if (length(n.seq)>2) { for (i in 2:(length(n.seq)-1)) { if (verbose) setTxtProgressBar(pb, i/(length(n.seq)-1)) pc.temp <- kdr.base(x=x, fhat2=fhat2, y=y[n.seq[i]:(n.seq[i+1]-1),], H=H, tol.iter=tol.iter, Hinv=Hinv, verbose=verbose, max.iter=max.iter, p=p) if (pre) { pc.temp[c("y","end.points")] <- lapply(pc.temp[c("y","end.points")], rescale) pc.temp[["path"]] <- lapply(pc.temp[["path"]], rescale) } pc$y <- rbind(pc$y, pc.temp$y) pc$end.points <- rbind(pc$end.points, pc.temp$end.points) pc$path <- c(pc$path, pc.temp$path) } } if (verbose) close(pb) ## remove short segments for p=1 if (p==1) { tol.seg <- 1e-2*max(apply(x.orig, 2, IQR)) pc.dendo <- hclust(dist(pc$end.points), method="single") pc.label <- cutree(pc.dendo, h=tol.seg) pc.label.ind <- pc.label %in% which(table(pc.label)>min.seg.size) pc$y <- pc$y[pc.label.ind,] pc$end.points <- pc$end.points[pc.label.ind,] pc$path <- pc$path[pc.label.ind] } pc$H <- H pc$names <- xnames if (pre) pc$H <- S12 %*% pc$H %*% S12 if (segment) pc <- kdr.segment(x=pc, k=k, kmax=kmax, min.seg.size=min.seg.size, verbose=verbose) else pc$end.points <- data.frame(pc$end.points, segment=1L) ## put paths as last element in list path.temp <- pc$path pc$path <- NULL pc$tol.iter <- tol.iter pc$min.seg.size <- min.seg.size pc$binned <- binned pc$names <- xnames pc$w <- w if (keep.path) pc$path <- path.temp return(pc) } kdr.base <-function(x, fhat2, H, y, max.iter, tol.iter, p=1, verbose=FALSE, Hinv, ...) { if (!is.matrix(x)) x <- as.matrix(x) if (!is.matrix(y)) y <- as.matrix(y) if (missing(Hinv)) Hinv <- chol2inv(chol(H)) nx <- nrow(x) ny <- nrow(y) d <- ncol(y) y.path <- split(y, row(y), drop=FALSE) names(y.path) <- NULL xHinv <- x %*% Hinv xHinvx <- rowSums(xHinv*x) y.update <- y i <- 1 eps <- max(sqrt(rowSums(y.update^2))) disp.ind <- head(sample(1:nrow(y)), n=min(1000,nrow(y))) while (eps > tol.iter & i < max.iter) { y.curr <- y.update yHinvy <- t(rowSums(y.curr%*%Hinv *y.curr)) Mah <- apply(yHinvy, 2, "+", xHinvx) - 2*xHinv %*% t(y.curr) w <- exp(-Mah/2) denom <- colSums(w) num <- t(w)%*%x mean.shift.H <- num/denom - y.curr fhat2.y.curr <- predict(fhat2, x=y.curr) for (j in 1:ny) { Hessian <- invvec(fhat2.y.curr[j,]) Hessian.svd <- eigen(Hessian, symmetric=TRUE) Up <- Hessian.svd$vectors[,tail(1:d,n=d-p)] mean.shift.H[j,] <- drop(Up %*% t(Up) %*% mean.shift.H[j,]) } y.update <- y.curr + mean.shift.H y.update.list <- split(y.update, row(y.update), drop=FALSE) y.path <- mapply(rbind, y.path, y.update.list, SIMPLIFY=FALSE) eps <- max(sqrt(rowSums((y.curr-y.update)^2))) if (verbose>1) { if (d==2) plot(y.update[disp.ind,], col=1, xlab="x", ylab="y") else pairs(y.update[disp.ind,], col=1) } i <- i+1 } pc.endpt <- t(sapply(y.path, tail, n=1, SIMPLIFY=FALSE)) pc <- list(x=x, y=y, end.points=pc.endpt, path=y.path, type="kdr") class(pc) <- "kdr" return(pc) } ## create segment of KDR filaments ## x = output from kdr kdr.segment <- function(x, k, kmax, min.seg.size, verbose=FALSE) { ep <- x$end.points if (any(names(ep) %in% "segment")) ep <- x$end.points[,-which(names(ep)=="segment")] if (missing(min.seg.size)) min.seg.size <- x$min.seg.size hc <- hclust(dist(ep), method="single") if (missing(kmax)) kmax <- 30 kmax <- min(kmax, nrow(ep)) if (missing(k)) { if (verbose) pb <- txtProgressBar() clust.ind <- rep(0, kmax) for (i in 1:kmax) { if (verbose) setTxtProgressBar(pb, i/kmax) clust.ind[i] <- clust.crit(hc=hc, x=as.matrix(ep), k=i, min.seg.size=min.seg.size) } if (verbose) close(pb) clust.ind[is.na(clust.ind)] <- 0 kopt <- which.max(clust.ind) } else kopt <- k ep <- data.frame(ep, segment=as.integer(cutree(hc, k=kopt))) label <- ep$segment tlabel <- as.integer(names(table(label))[table(label) > min.seg.size]) ep <- ep[label %in% tlabel,] ep$segment <- factor(ep$segment, labels=1:length(unique(ep$segment))) ep$segment <- as.integer(levels(ep$segment))[ep$segment] ## re-order KDR segments into 'reasonable' linestring order ## experimental j <- 1 for (i in unique(ep$segment)) { ep.temp <- as.matrix(ep[ep$segment==i,-ncol(ep)]) ep.temp <- data.frame(chain.knnx(ep.temp, k1=1, k2=1), segment=i) if (j==1) ep.ord <- ep.temp else ep.ord <- rbind(ep.ord, ep.temp) j <- j+1 } names(ep.ord) <- c(x$names, "segment") rownames(ep.ord) <- NULL x$end.points <- ep.ord x$min.seg.size <- min.seg.size x$k <- kopt if (exists("clust.ind")) x$clust.ind <- clust.ind return(x) } ## rbind nearest neighbour of y from x to y add.knnx <- function(x, y, k=1) { xynn <- FNN::get.knnx(x, y, k=k) y <- rbind(y, x[xynn$nn.index,]) d <- ncol(x) xy <- list(x=matrix(x[-xynn$nn.index,],ncol=d), y=y) return(xy) } ## arrange points in KDR to form a "reasonable" linestring chain.knnx <- function(x, k1=1, k2=5) { ## concatenate the nearest neighbours in a chain ## start with first point in x if (!is.matrix(x)) x <- as.matrix(x) d <- ncol(x) if (nrow(x)>1) { x.ord.list <- add.knnx(x=matrix(x[-1,], ncol=d), y=matrix(x[1,], ncol=d), k=k1) x.ord <- x.ord.list$y while (nrow(x.ord.list$x)>0) { y.temp <- matrix(apply(as.matrix(tail(x.ord.list$y, n=k2)), 2, mean), ncol=d) colnames(y.temp) <- names(x.ord.list$x) x.ord.list.temp <- add.knnx(x=x.ord.list$x, y=y.temp, k=k1) x.ord <- rbind(x.ord, matrix(tail(x.ord.list.temp$y,n=1),ncol=d)) x.ord.list <- x.ord.list.temp } ## decide which permutation is "best" linestring ## break at max discontinuity ind <- which.max(rowSums((head(x.ord, n=-1)-tail(x.ord,n=-1))^2)) ind1 <- c(1:ind, (ind+1):nrow(x.ord)) ind2 <- c(1:ind, rev((ind+1):nrow(x.ord))) ind3 <- c(rev(1:ind), (ind+1):nrow(x.ord)) ind4 <- c(rev(1:ind), rev((ind+1):nrow(x.ord))) x.ord1 <- x.ord[ind1,] x.ord2 <- x.ord[ind2,] x.ord3 <- x.ord[ind3,] x.ord4 <- x.ord[ind4,] x.ord.dist <- rep(0,4) x.ord.dist[1] <- sum(rowSums((head(x.ord1, n=-1)-tail(x.ord1,n=-1))^2)) x.ord.dist[2] <- sum(rowSums((head(x.ord2, n=-1)-tail(x.ord2,n=-1))^2)) x.ord.dist[3] <- sum(rowSums((head(x.ord3, n=-1)-tail(x.ord3,n=-1))^2)) x.ord.dist[4] <- sum(rowSums((head(x.ord4, n=-1)-tail(x.ord4,n=-1))^2)) x.ord <- get(paste0("x.ord", which.min(x.ord.dist))) } else x.ord <- x return(x.ord) } ## Calinski-Harabasz clustering criterion for hierarchical clustering object clust.crit <- function(hc, x, k, min.seg.size=1) { label <- cutree(hc, k=k) tlabel <- as.integer(names(table(label))[table(label) > min.seg.size]) tlabel.ind <- label %in% tlabel cc <- fpc.calinhara(x=x, clustering=label) return(cc) } ## copied from fpc::calinhara 2020-09-18 fpc.calinhara <- function(x, clustering, cn = max(clustering)) { x <- as.matrix(x) p <- ncol(x) n <- nrow(x) cln <- rep(0, cn) W <- matrix(0, p, p) for (i in 1:cn) cln[i] <- sum(clustering == i) for (i in 1:cn) { clx <- x[clustering == i, ] cclx <- cov(as.matrix(clx)) if (cln[i] < 2) cclx <- 0 W <- W + ((cln[i] - 1) * cclx) } S <- (n - 1) * cov(x) B <- S - W out <- (n - cn) * sum(diag(B))/((cn - 1) * sum(diag(W))) return(out) } ############################################################################# ## S3 methods for KDR objects ############################################################################# ## plot method plot.kdr <- function(x, ...) { fhat <- x d <- ncol(fhat$x) if (d==2) { plotret <- plotkdr.2d(fhat, ...) invisible(plotret) } else if (d==3) { plotkdr.3d(fhat, ...) invisible() } else stop ("Plot function only available for 2 or 3-d data") } plotkdr.2d <- function(x, add=FALSE, col, type="p", alpha=1, ...) { xp <- x$end.points if (!any(names(xp) %in% "segment")) { if (missing(col)) col <- 6 col <- transparency.col(col, alpha=alpha) if (!add) plot(xp, col=col, type=type, ...) else points(xp, col=col, ...) } else { xps <- unique(xp$segment) if (missing(col)) col <- hcl.colors(n=length(xps), palette="Set2") if (length(col) < length(xps)) col <- rep(col, length(xps)) col <- transparency.col(col, alpha=alpha) if (!add) plot(xp[,-ncol(xp)], col="transparent", ...) for (i in 1:length(xps)) lines(xp[xp$segment==xps[i],-ncol(xp)], col=col[i], type=type, ...) } } plotkdr.3d <- function(x, display="plot3D", colors, col, col.fun, alphavec, size=3, cex=1, pch=1, theta=-30, phi=40, d=4, ticktype="detailed", add=FALSE, xlab, ylab, zlab, alpha=1, box=TRUE, axes=TRUE, type="p", ...) { fhat <- x xp <- x$end.points if (missing(xlab)) xlab <- fhat$names[1] if (missing(ylab)) ylab <- fhat$names[2] if (missing(zlab)) zlab <- fhat$names[3] if (!any(names(xp) %in% "segment")) { if (missing(col)) col <- 6 } else { xps <- unique(xp$segment) if (missing(col)) col <- hcl.colors(n=length(xps), palette="Set2") if (length(col) < length(xps)) col <- rep(col, length(xps)) } colors <- col disp <- match.arg(display, c("plot3D", "rgl")) if (disp %in% "plot3D") { if (!add) plot3D::scatter3D(x=xp[,1], y=xp[,2], z=xp[,3], add=add, theta=theta, phi=phi, d=d, type=type, xlab=xlab, ylab=ylab, zlab=zlab, ticktype=ticktype, type="n", col=NA, ...) for (i in 1:length(xps)) plot3D::scatter3D(x=xp[xp$segment==xps[i],1], y=xp[xp$segment==xps[i],2], z=xp[xp$segment==xps[i],3], cex=cex, col=col[i], add=TRUE, pch=pch, type=type, alpha=alpha, ...) } else if (disp %in% "rgl") { if (!requireNamespace("rgl", quietly=TRUE)) stop("Install the rgl package as it is required.", call.=FALSE) for (i in 1:length(xps)) rgl::plot3d(x=xp[xp$segment==xps[i],1], y=xp[xp$segment==xps[i],2], z=xp[xp$segment==xps[i],3], col=col[i], alpha=alpha, xlab=xlab, ylab=ylab, zlab=zlab, add=add | (i>1), box=box, axes=axes, type=type, size=size, ...) } }
b7746954597c92f16383ad3228b176b35f10a47c
1de8c2a4fb90df3295cc0678d9d449a6f1bb9b48
/generate_phylANOVA_label.R
a9b3de223e689491a2a4ccb0531327a5b46dbc2d
[]
no_license
jrosaceae/comparative_methods_misc
946970ca2950d84d59a4547a38ff0307e932b2ab
780bfb67c0037021c30245101b9bccb217073db0
refs/heads/master
2022-09-18T05:03:13.451758
2020-05-18T17:08:53
2020-05-18T17:08:53
265,007,103
0
0
null
null
null
null
UTF-8
R
false
false
737
r
generate_phylANOVA_label.R
`generate_phylANOVA_label` <- function(phyNOVA) { # All that is needed is the output from phytools::phylANOVA # Extract p-values and assign groups from phyloANOVA post-hoc table df.prep<-phyNOVA$Pt df.prep[lower.tri(df.prep,diag = T)] <- NA df.prep<-na.omit(melt(df.prep)) df.prep[,1]<-paste(df.prep[,1],"-",df.prep[,2],sep="") phynova.levels <- df.prep[,3] names(phynova.levels)=df.prep[,1] phynova.labels <- data.frame(multcompLetters(phynova.levels)['Letters']) #put the labels in the same order as input (alphabetical or otheriwise): phynova.labels$treatment=rownames(phynova.labels) phynova.labels=phynova.labels[order(factor(phynova.labels$treatment, levels=names(x$Pt[,1]))),] return(phynova.labels) }
f6b13b1bc833ab06ca1c3025b8d01c68e0e4b94b
c8b5d303995efaf03fa2972306c966d097e7e452
/pokemon_radar_plot.R
aee789c910de4fcd58b3a305f1f795028c60d72e
[]
no_license
northernned/pokemon-radar-plot
1befe0c51e35da4bc62b4b1fd77f583c8944455f
9bfe189051274c0d5a0066f5f47e890329bbd271
refs/heads/master
2020-03-19T01:54:12.225430
2018-06-03T11:50:27
2018-06-03T11:50:27
135,581,299
1
0
null
null
null
null
UTF-8
R
false
false
8,165
r
pokemon_radar_plot.R
# import libraries library(ggplot2) library(palettetown) library(png) # create the data with the stats for chosen pokemon Articuno <- c(HP = 90, Attack = 85, Defense = 100, Sp.Atk = 95, Sp.Def = 125, Speed = 85) Moltres <- c(HP = 90, Attack = 100, Defense = 90, Sp.Atk = 125, Sp.Def = 85, Speed = 90) Zapdos <- c(HP = 90, Attack = 90, Defense = 85, Sp.Atk = 125, Sp.Def = 90, Speed = 100) # Reason for hard coding Moltres' colour is because he shares a similar primary colour as Zapdos Moltres_red <- "#E80000" Zapdos_yellow <- pokepal('Zapdos', 1) Articuno_blue <- pokepal('Articuno', 1) data <- rbind(Articuno, Moltres, Zapdos) Attributes <- colnames(data) AttNo <- length(Attributes) data <- cbind(data, data[,1]) # Create a function to draw the circles for the radar plot draw_circle <- function(center = c(0,0), diameter = 1, npoints = 100){ radius = diameter / 2 tt <- seq(0, 2 * pi, length.out = npoints) xx <- center[1] + radius * cos(tt) yy <- center[2] + radius * sin(tt) return(data.frame(x = xx, y = yy)) } # draw the circles circle1 <- draw_circle(diameter = 250) circle2 <- draw_circle(diameter = 200) circle3 <- draw_circle(diameter = 150) circle4 <- draw_circle(diameter = 100) angle_split <- (2 * pi) / (AttNo) angle_split_seq <- seq(0, (2 * pi), angle_split) # Create empty dataframes to collect the results line_data <- data.frame(x = numeric, y = numeric, stringsAsFactors = F) title_position <- data.frame(title = character, x = numeric, y = numeric, stringsAsFactors = F) # create plot background for (i in 1:ncol(data)) { angle_multiplier <- if(i < ncol(data)){i}else{1} radians_for_segment <- angle_split_seq[i] x <- 150 * cos(radians_for_segment) y <- 150 * sin(radians_for_segment) temp <- data.frame(x = x, y = y, stringsAsFactors = FALSE) line_data <- rbind(temp, line_data) x <- 150 * cos(radians_for_segment) y <- 150 * sin(radians_for_segment) title <- colnames(data)[i] temp <- data.frame(title = title, x = x, y = y, stringsAsFactors = FALSE) title_position <- rbind(temp, title_position)} # create the value labels data values <- c(25, 50, 75, 100, 125) radian_for_values <- angle_split / 2 x <- values * cos(radian_for_values) y <- values * sin(radian_for_values) values_position <- data.frame(values = values, x = x, y = y, stringsAsFactors = FALSE) # add the origin values line_data$x2 <- 0 line_data$y2 <- 0 # empty dataframe to catch result polydata <- data.frame(pokemon = character, value = numeric, radians = numeric, x = numeric, y = numeric, stringsAsFactors = FALSE) # create polygon data for (i in 1:ncol(data)) { for (p in 1:nrow(data)) { poke2calc <- data[p,] angle_multiplier <- if(i < ncol(data)){i}else{1} radians_for_segment <- angle_split_seq[i] x <- poke2calc[i] * cos(radians_for_segment) y <- poke2calc[i] * sin(radians_for_segment) pokemon<- rownames(data)[p] temp <- data.frame(pokemon = pokemon, value = poke2calc[i], radians = radians_for_segment, x = x, y = y, stringsAsFactors = FALSE) polydata <- rbind(temp, polydata) } } # Split the data for the pokemon pokemon_database <- unique(polydata$pokemon) pokemon1 <- polydata[which(polydata$pokemon == pokemon_database[1]),] pokemon2 <- polydata[which(polydata$pokemon == pokemon_database[2]),] pokemon3 <- polydata[which(polydata$pokemon == pokemon_database[3]),] # Create the title string for each pokemon pokemon1_title <- gsub('([[:upper:]])', '\\1', pokemon_database[1]) pokemon2_title <- gsub('([[:upper:]])', '\\1', pokemon_database[2]) pokemon3_title <- gsub('([[:upper:]])', '\\1', pokemon_database[3]) # Start the png to create a hi-res image of the plot png(file="examples\\original_legendary_birds.png", width = 800, height = 900, bg = "transparent") # Create the radar background ggplot() + xlim(c(-150, 150)) + ylim(c(-200, 200)) + # Add the Circles geom_polygon(data = circle1, aes(x = x, y = y), fill = "#5A5F72", colour = "#5A5F72") + geom_polygon(data = circle2, aes(x = x, y = y), fill = "#969696", colour = "#969696") + geom_polygon(data = circle3, aes(x = x, y = y), fill = "#5A5F72", colour = "#5A5F72") + geom_polygon(data = circle4, aes(x = x, y = y), fill = "#969696", colour = "#969696") + # Void the theme and and a background fill theme_void() + theme(plot.background = element_rect( fill = "#1F2833")) + # Add the segment lines and attribute/values titles geom_segment(data = line_data, aes(x = line_data$x, y = line_data$y, xend = line_data$x2, yend = line_data$y2), colour = "#FFFFFF", linetype = "dashed") + annotate("text", x = title_position$x, y = title_position$y, label = title_position$title, size = 4, colour = "#FFFFFF") + annotate("text", x = values_position$x, y = values_position$y, label = values_position$values, size = 3.5, colour = "#FFFFFF") + # Add Chart Title annotate("text", x = 150, y = 190, label = "Pokemon Radar Plot", size = 7, colour = "#969696", family = "Helvetica", fontface = "bold", hjust = 1) + annotate("text", x = 150, y = 170, label = "PokemonDB", size = 6, colour = "#5A5F72", family = "Helvetica", fontface = "bold", hjust = 1) + # Add pokemon 1 data geom_polygon(data = pokemon1, aes(x = x, y = y), fill = Zapdos_yellow, colour = Zapdos_yellow, alpha = 0.3) + annotate("text", x = -150, y = 190, label = pokemon1_title, size = 10, colour = Zapdos_yellow, family = "Helvetica", fontface = "bold", hjust = 0) + annotate("text", x = -140, y = 180, label = "vs", size = 5, colour = "#969696", family = "Helvetica", hjust = 0) + # Add pokemon 2 data geom_polygon(data = pokemon2, aes(x = x, y = y), fill = Moltres_red, colour = Moltres_red, alpha = 0.3) + annotate("text", x = -150, y = 170, label = pokemon2_title, size = 10, colour = Moltres_red, family = "Helvetica", fontface = "bold", hjust = 0) + annotate("text", x = -140, y = 160, label = "vs", size = 5, colour = "#969696", family = "Helvetica", hjust = 0) + # Add pokemon 2 data geom_polygon(data = pokemon3, aes(x = x, y = y), fill = Articuno_blue, colour = Articuno_blue, alpha = 0.3) + annotate("text", x = -150, y = 150, label = pokemon3_title, size = 10, colour = Articuno_blue, family = "Helvetica", fontface = "bold", hjust = 0) dev.off()
871cfcec27bc217b132df474ccb656a76af95c80
4674c7ff9404adde68b1b21478eafbff3a2a5843
/BIOSurvey2/BIOSurvey2/R/summary.post.stratify.R
da249aa897caa9b7806e1ce8586b7588a5e986b8
[]
no_license
risdell/VA_Oyster_Survey
cfed9bf6d1be1e6aef32005a20ea059afc3f4a58
7de643aa33b6d91fac3283fa830d356a285c8432
refs/heads/master
2023-01-25T01:30:40.208883
2023-01-20T16:14:05
2023-01-20T16:14:05
189,088,967
0
0
null
null
null
null
UTF-8
R
false
false
176
r
summary.post.stratify.R
summary.post.stratify <- function(object,...) { list(object,ystr.mean=sum(object$Nh*object$ybpos)/sum(object$Nh),V.mean=sum(object$Nh^2*object$Vst.ybpos)/sum(object$Nh)^2) }
8ee6024e27bba48b4422021179013d04a1b46e8c
0a906cf8b1b7da2aea87de958e3662870df49727
/grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610051326-test.R
88df6bba95979a49ed427b6dd4e1a9e02896bfc9
[]
no_license
akhikolla/updated-only-Issues
a85c887f0e1aae8a8dc358717d55b21678d04660
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
refs/heads/master
2023-04-13T08:22:15.699449
2021-04-21T16:25:35
2021-04-21T16:25:35
360,232,775
0
0
null
null
null
null
UTF-8
R
false
false
556
r
1610051326-test.R
testlist <- list(rates = numeric(0), thresholds = numeric(0), x = c(-1.26836459270732e-30, NaN, 1.00891829346114e-309, -2.37619995226551e-289, -1.26836459123889e-30, 9.37339630957792e-312, -5.78534238436574e-34, -1.26836459270829e-30, -1.26836459122741e-30, 9.37339630957792e-312, 1.70257006040729e-313, -3.9759940224262e-34, -1.26823100659151e-30, -1.26836459270829e-30, 2.39422219319154e-301, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) result <- do.call(grattan::IncomeTax,testlist) str(result)
1dce1276f2168a0fb40934e551f8e1abd9630f45
2195aa79fbd3cf2f048ad5a9ee3a1ef948ff6601
/docs/SyncFilterDialog.rd
cbd0263e54e4157dea3d14ff3beb143349279f2e
[ "MIT" ]
permissive
snakamura/q3
d3601503df4ebb08f051332a9669cd71dc5256b2
6ab405b61deec8bb3fc0f35057dd880efd96b87f
refs/heads/master
2016-09-02T00:33:43.224628
2014-07-22T23:38:22
2014-07-22T23:38:22
null
0
0
null
null
null
null
UTF-8
R
false
false
2,641
rd
SyncFilterDialog.rd
=begin =[同期フィルタ]ダイアログ [同期フィルタ]では個々の同期フィルタについて、その同期フィルタが使われる条件とその動作を編集します。 ((<[同期フィルタ]ダイアログ|"IMG:images/SyncFilterDialog.png">)) +[条件] 同期フィルタの条件を((<マクロ|URL:Macro.html>))で指定します。対象のメッセージに対して指定されたマクロを評価した結果がTrueになると、この同期フィルタで設定した動作が実行されます。 +[編集] ((<[条件]ダイアログ|URL:ConditionsDialog.html>))を開いて条件を編集します。 +[フォルダ] 同期フィルタを有効にするフォルダを指定します。指定しない場合には全てのフォルダに対して有効になります。フォルダはフォルダの完全名または正規表現で指定します。正規表現で指定する場合には、//で括って指定します。この場合、完全名がその正規表現にマッチするフォルダで有効になります。 +[動作] 動作を指定します。指定できるのは以下のいずれかです。 :ダウンロード (POP3) メッセージをダウンロードします。[最大行数]でダウンロードする最大行数を指定できます。[最大行数]に-1を指定するとメッセージ全体をダウンロードします。 :ダウンロード (IMAP4) メッセージをダウンロードします。[タイプ]でダウンロードする方法を指定できます。指定できるのは以下のいずれかです。 :全て メッセージ全体をダウンロードします。 :テキスト メッセージのうちテキスト部分をダウンロードし、添付ファイル部分はダウンロードしません。 :HTML メッセージがHTMLメッセージだった場合にはHTML部分(HTMLのテキストと埋め込まれている画像など)をダウンロードします。HTMLメッセージでなかった場合には「テキスト」と同じです。 :ヘッダ メッセージのヘッダをダウンロードします。 :ダウンロード (NNTP) メッセージをダウンロードします。 :削除 (POP3, IMAP4) メッセージをサーバ上から削除します。 :無視 (POP3, NNTP) 無視します。インデックスも生成しません。 +[説明] 同期フィルタの説明を指定します。説明を指定すると、((<[同期フィルタ]ダイアログ|URL:SyncFiltersDialog.html>))に表示されます。 =end
f7bca9c6fbf9c1627007db21552a3caa0fdb137b
691927e840f0d8f32057add34ec071adbc3231b6
/cachematrix.R
8f74a7ec176d168921664de3e260bc8cf599a752
[]
no_license
aemon12/ProgrammingAssignment2
f663288bb25c9eb6b3f67659760db58b30278792
a4d82dd51b55eecd4bc2888ea33356c0fc6d16e0
refs/heads/master
2021-01-16T21:17:16.600212
2015-06-19T21:27:21
2015-06-19T21:27:21
37,617,193
0
0
null
2015-06-17T19:48:42
2015-06-17T19:48:42
null
UTF-8
R
false
false
1,153
r
cachematrix.R
## This set of functions can be used to store the result of matrix inversion calculation ## in order not to repeat this lengthy operation. ## If any data within the solved matrix is changed, the inverse result is erased. makeCacheMatrix <- function(x = matrix()) { i <- NULL set <- function(y) { x <<- y i <<- NULL } get <- function() x setInversion <- function(solve) i <<- solve getInversion <- function() i list(set=set, get=get, setInversion=setInversion, getInversion=getInversion) ## Store the matrix inversion calculation result ## Manipulate the data within the matrix without retaining the calculated inverse } cacheSolve <- function(x, ...) { i <- x$getInversion() if(!is.null(i)) { message("getting cached data") return(i) } else { data <- x$get() i <- solve(data, ...) x$setInversion(i) i } ## Return a matrix that is the inverse of 'x' }
3f87c9c1a1d795997b399358e80bf2e02d380b3b
07ae778ef3a85a9ce3f2ff76a970c6e61d9ef83e
/rprogramming/lab1/pollutantmean.R
8d99309d130b1fae17fb5f4af63bd3eb238820c1
[]
no_license
jiyoochang95/datasciencecoursera
3f2700ccb2c13f50c03d8ee59aa77edf45798e7a
b786827c8d36a04b0857ecd55ae8eecfadb2e895
refs/heads/master
2021-01-16T18:26:19.857191
2017-08-25T23:56:50
2017-08-25T23:56:50
100,079,597
0
0
null
null
null
null
UTF-8
R
false
false
721
r
pollutantmean.R
#Calculates the mean of a pollutant pollutantmean<- function(directory, pollutant, id = 1:332){ #figure out the csv file name for the id bind<-data.frame() for (i in id){ if (i<10){ newid <- paste("00", i, ".csv", sep="") } else if (i>=10 && i<100){ newid <- paste("0", i, ".csv", sep="") } path <- paste(directory, "/", newid, sep="") data <- read.csv(path) bind<-rbind(bind,data) ##merge all the data frames } # nitrate if (pollutant == "nitrate"){ m<-mean(bind$nitrate, na.rm=TRUE) } #sulfate else if (pollutant == "sulfate"){ m<-mean(bind$sulfate, na.rm=TRUE) } m }
423c46107369e40ec97f96902e9aa04f277516d9
3194aa9fe7bbc3ede88d02d554bd339f6bd04fc4
/man/toy.Rd
3dd5262d33535685826ef440ceae2f77505c759d
[]
no_license
RobinHankin/emulator
e99b3a0997c9be1bc96220da96c30a0e694b6a8f
cebc2ea3d9d12ee6a349fa51c469d6fd092b78be
refs/heads/master
2023-02-04T21:16:13.656391
2023-01-29T19:51:39
2023-01-29T19:51:39
126,391,589
3
0
null
null
null
null
UTF-8
R
false
false
883
rd
toy.Rd
\name{toy} \alias{toy} \docType{data} \title{A toy dataset} \description{ A matrix consisting of 10 rows and 6 columns corresponding to 10 points in a six-dimensional space.} \usage{data(toy)} \examples{ data(toy) real.relation <- function(x){sum( (1:6)*x )} d <- apply(toy, 1, real.relation) # Supply some scales: fish <- rep(2,6) # Calculate the A matrix: A <- corr.matrix(toy,scales=fish) Ainv <- solve(A) # Now add some suitably correlated noise: d.noisy <- as.vector(rmvnorm(n=1,mean=d, 0.1*A)) # Choose a point: x.unknown <- rep(0.5,6) # Now use interpolant: interpolant(x.unknown, d.noisy, toy, Ainv, scales=fish, g=FALSE) # Now verify by checking the first row of toy: interpolant(toy[1,], d.noisy, toy, Ainv, scales=fish, g=FALSE) # Should match d.noisy[1]. } \keyword{datasets}
009c47ff72307e755365a83cc54690b83b1342f1
3fedf9cce68666e98dfd9acc079bc0af73d42165
/scripts/main.R
972f839a24765933e83d5dac27c4889b44be2023
[]
no_license
woldemarg/lightit_test
caae7082ac071dd4853c2b9359e1595f3e88d58c
b3d9bd57415513aebb5c9b1733303f48a1b4d573
refs/heads/master
2021-02-13T22:29:33.028262
2020-04-27T23:45:53
2020-04-27T23:45:53
244,739,765
0
0
null
null
null
null
UTF-8
R
false
false
17,591
r
main.R
library(tidyverse) library(lubridate) #parse dates library(geosphere) #calculate distance to event location library(randomForest) library(glmnet) library(fastDummies) #one-hot-encoding for ridge and xgb library(gbm) library(xgboost) #all files were previously encoded to utf-8 description <- read_csv("data/encoded/columns_description.csv") events <- read_csv("data/encoded/events_Hokkaido.csv") holidays <- read_csv("data/encoded/holidays_Japan.csv") jalan <- read_csv("data/encoded/jalan_shinchitose.csv") rakuten <- read_csv("data/encoded/rakuten_shinchitose.csv") weather <- read_csv("data/encoded/weather_Hokkaido.csv") names(rakuten) <- description$`Row(EN)`[1:48] names(jalan) <- description$`Row(EN)`[49:94] #parsing dates in all input data get_date <- function(timestamp, format = "Y/m/d H:M") { return(date(parse_date_time(timestamp, format))) } holidays <- holidays %>% mutate(day = get_date(day, "Y/m/d")) events <- events %>% mutate( start_date = get_date(start_date, "Y/m/d"), end_date = get_date(end_date, "Y/m/d"), ) weather <- weather %>% mutate(date = get_date(date, "Y/m/d")) #identical columns within main datasets jalan_filtered <- jalan %>% mutate( company_name = "jalan", request_date = get_date(request_date_time), pickup_date = get_date(pickup_date_time), return_date = get_date(return_date_time), cancellation_date = get_date(cancellation_date_time), is_cancelled = ifelse(!is.na(cancellation_date_time), 1, 0), #num_of_passengers = as.numeric(str_extract_all(number_of_passengers, "\\d+")[[1]][1]), #num_of_children = as.numeric(str_extract_all(number_of_passengers, "\\d+")[[1]][2]) ) %>% select( company_name, request_date, pickup_date, return_date, request_date, cancellation_date, is_cancelled, #num_of_passengers, #num_of_children, #arrival_flight, #total_price ) rakuten_filtered <- rakuten %>% mutate( company_name = "rakuten", request_date = get_date(request_date_time), pickup_date = get_date(pickup_date_time), return_date = get_date(return_date_time), cancellation_date = get_date(cancel_request_date_time), is_cancelled = ifelse(!is.na(cancel_request_date_time), 1, 0), ) %>% # rename( # car_class = company_car_class_code, # num_of_passengers = number_of_passengers, # num_of_children = number_of_children, # arrival_flight = flight_number, # total_price = taxable_amount # ) %>% select( company_name, request_date, pickup_date, return_date, request_date, cancellation_date, is_cancelled, #num_of_passengers, #leave these cols for further predicting improvement #num_of_children, #though not available for predicting might be useful #arrival_flight, #for some feature engineering, such as assesing #total_price #promotional offers etc ) df_joined <- bind_rows(jalan_filtered, rakuten_filtered) #df_joined$arrival_flight[df_joined$arrival_flight == "0"] <- NA write_csv(df_joined, "derived/df_joined.csv") #preparing data in such a way to get a row with a #target value for each company per each given date model_data <- df_joined %>% filter(is_cancelled == 0) %>% group_by(pickup_date, company_name) %>% summarise(target = n()) %>% ungroup() %>% pivot_wider(names_from = pickup_date, values_from = target) %>% pivot_longer(names(.)[-1], names_to = "pickup_date", values_to = "target") %>% select(target, everything()) model_data[is.na(model_data)] <- 0 model_data$pickup_date <- as_date(model_data$pickup_date) model_data$company_name <- as_factor(model_data$company_name) ggplot(data = model_data, aes(x = pickup_date, y = target, color = company_name)) + geom_bar(stat = "identity") #simple average approach get_cv_rmse_mean_model <- function(data, k = 10, seed = 1) { set.seed(seed) data <- data %>% mutate(dow = as.character(wday(pickup_date, label = TRUE)), mon = month(pickup_date)) %>% select(-pickup_date) data <- data[sample(nrow(data)), ] folds <- cut(seq(1, nrow(data)), breaks = k, labels = FALSE) rmse <- c() for (i in 1:k) { indices <- which(folds == i, arr.ind = TRUE) test_data <- data[indices, ] train_data <- data[-indices, ] mean_model <- train_data %>% group_by(company_name, dow, mon) %>% summarize(predicts = mean(target)) %>% ungroup() test_data <- left_join(test_data, mean_model, by = c("company_name", "dow", "mon")) rmse[i] <- sqrt(mean(( test_data$predicts - test_data$target ) ^ 2)) } mean(rmse) } rmse_mean_model <- get_cv_rmse_mean_model(model_data, seed = 1) #some feature engineering model_data <- model_data %>% mutate(mon_dow = paste(month(pickup_date, label = TRUE), as.character(wday(pickup_date, label = TRUE)), sep = "_")) mon_dow_lookup <- model_data %>% group_by(mon_dow) %>% summarise(mon_dow_encoded = mean(target)) #target encoding for date model_data <- left_join(model_data, mon_dow_lookup, by = "mon_dow") %>% select(-mon_dow) #start of long weekend (first day of two or more holidays in a row) holidays <- holidays %>% mutate(start_long_we = ifelse( (lag(Japan) == 0 & Japan == 1 & lead(Japan) == 1) | wday(day) == 6 & Japan == 1 | wday(day) == 7 & lead(Japan, 2) == 1, 1, 0 )) model_data <- model_data %>% mutate(is_start_long_we = holidays$start_long_we[match(pickup_date, holidays$day)]) #upcoming events avg_rent_duration <- as.numeric(mean(df_joined$return_date[df_joined$is_cancelled != 0] - df_joined$pickup_date[df_joined$is_cancelled != 0])) get_events <- function(pickup_day) { evs <- events %>% filter(city_id != 1536 & start_date == pickup_day + 1 & end_date - start_date < avg_rent_duration) evs$dist <- apply(evs, 1, function(row) distm(as.numeric(row[6:7]), c(141.650876, 42.8209577), fun = distHaversine) / 1000) nrow(subset(evs, dist > 60)) #manual adjustment } model_data$events <- sapply(model_data$pickup_date, get_events) write_csv(model_data, "derived/model_data.csv") #modelling get_cv_rmse_rf_model <- function(data, k = 10, seed = 1) { set.seed(seed) data <- data %>% select(-pickup_date) data <- data[sample(nrow(data)), ] folds <- cut(seq(1, nrow(data)), breaks = k, labels = FALSE) rmse <- c() for (i in 1:k) { indices <- which(folds == i, arr.ind = TRUE) test_data <- data[indices, ] train_data <- data[-indices, ] rf_mod = randomForest(target ~ . , data = train_data) predicts <- predict(rf_mod, test_data[,-1]) rmse[i] <- sqrt(mean((predicts - test_data$target) ^ 2)) } mean(rmse) } get_cv_rmse_ridge_model <- function(data, k = 10, seed = 1) { set.seed(seed) data <- data %>% select(-pickup_date) %>% dummy_cols( select_columns = c("company_name"), remove_first_dummy = TRUE, remove_selected_columns = TRUE ) data <- data[sample(nrow(data)), ] folds <- cut(seq(1, nrow(data)), breaks = k, labels = FALSE) rmse <- c() for (i in 1:k) { indices <- which(folds == i, arr.ind = TRUE) test_data <- data[indices, ] train_data <- data[-indices, ] cv_ridge <- cv.glmnet( x = as.matrix(train_data[names(train_data) != "target"]), y = train_data[["target"]], nfolds = 10, standardize = TRUE, alpha = 0 ) predicts <- predict(cv_ridge, s = cv_ridge$lambda.min, newx = as.matrix(test_data[,-1])) rmse[i] <- sqrt(mean((predicts - test_data$target) ^ 2)) } mean(rmse) } rmse_rf_model <- get_cv_rmse_rf_model(model_data) rmse_ridge_model <- get_cv_rmse_ridge_model(model_data) #trying to deal with weather forcast city_weather <- weather %>% filter(city_id == 1536) %>% distinct(date, .keep_all = TRUE) %>% mutate(avg_temp = (low_temp + high_temp) / 2) %>% select(date, avg_temp) %>% complete(date = seq(date[1], as_date("2019-02-28"), by = "1 day")) %>% fill(avg_temp) rented_cars <- df_joined %>% filter(is_cancelled == 0) %>% mutate(mon = month(pickup_date)) %>% group_by(mon) %>% summarise(rented = n()) %>% ungroup() cancelled_orders <- df_joined %>% filter(is_cancelled == 1 & pickup_date - cancellation_date <= 1) %>% mutate(mon = month(pickup_date)) %>% group_by(mon) %>% summarise(cancelled = n()) %>% ungroup() rush_cancellations <- rented_cars %>% left_join(cancelled_orders, by = "mon") %>% mutate(rush_ratio = cancelled / (rented + cancelled)) %>% select(mon, rush_ratio) temp_drop_days <- tibble(drop = ifelse(diff(city_weather$avg_temp, 2) / na.omit(lag( city_weather$avg_temp, 2 )) < 0, 1, 0), date = city_weather$date[3:nrow(city_weather)]) model_data_alt <- model_data %>% inner_join(temp_drop_days, by = c("pickup_date" = "date")) %>% mutate(drop = ifelse(drop == 1, rush_cancellations$rush_ratio[match(month(pickup_date), rush_cancellations$mon)], 0)) write_csv(model_data_alt, "derived/model_data_alt.csv") rmse_rf_model_alt_data <- get_cv_rmse_rf_model(model_data_alt) rmse_ridge_model_alt_data <- get_cv_rmse_ridge_model(model_data_alt) #gbm tuning via hyperparameter grid hyper_grid <- expand.grid( shrinkage = c(.1, .3, .5), interaction.depth = c(3, 5, 7), n.minobsinnode = c(5, 10, 15), bag.fraction = c(.5, .6, .8), optimal_trees = 0, min_RMSE = 0 ) #grid search for (i in 1:nrow(hyper_grid)) { set.seed(1) gbm_tune <- gbm( formula = target ~ ., distribution = "gaussian", data = model_data %>% select(-pickup_date), n.trees = 1000, interaction.depth = hyper_grid$interaction.depth[i], shrinkage = hyper_grid$shrinkage[i], n.minobsinnode = hyper_grid$n.minobsinnode[i], bag.fraction = hyper_grid$bag.fraction[i], train.fraction = .75, n.cores = NULL, verbose = FALSE ) hyper_grid$optimal_trees[i] <- which.min(gbm_tune$valid.error) hyper_grid$min_RMSE[i] <- sqrt(min(gbm_tune$valid.error)) } hyper_grid %>% arrange(min_RMSE) %>% head(10) get_cv_rmse_gbm_model <- function(data, k = 10, seed = 1) { set.seed(seed) data <- data %>% select(-pickup_date) data <- data[sample(nrow(data)), ] folds <- cut(seq(1, nrow(data)), breaks = k, labels = FALSE) rmse <- c() for (i in 1:k) { indices <- which(folds == i, arr.ind = TRUE) test_data <- data[indices, ] train_data <- data[-indices, ] gbm_best <- gbm( formula = target ~ ., distribution = "gaussian", data = train_data, n.trees = 10, interaction.depth = 3, shrinkage = .5, n.minobsinnode = 15, bag.fraction = .5, train.fraction = .75, n.cores = NULL, verbose = FALSE ) predicts <- predict(gbm_best, test_data[,-1]) rmse[i] <- sqrt(mean((predicts - test_data$target) ^ 2)) } mean(rmse) } rmse_gbm_model <- get_cv_rmse_gbm_model(model_data) rmse_gbm_model_alt_data <- get_cv_rmse_gbm_model(model_data_alt) #tuning xgboost via hyperparameter grid hyper_grid_xgb <- expand.grid( eta = c(.01, .05, .1, .3), max_depth = c(1, 3, 5, 7), min_child_weight = c(1, 3, 5, 7), subsample = c(.6, .8, 1), colsample_bytree = c(.8, .9, 1), optimal_trees = 0, min_RMSE = 0 ) xgb_data <- model_data %>% select(-pickup_date) %>% dummy_cols( select_columns = c("company_name"), remove_first_dummy = TRUE, remove_selected_columns = TRUE ) #grid search for (i in 1:nrow(hyper_grid_xgb)) { params <- list( eta = hyper_grid_xgb$eta[i], max_depth = hyper_grid_xgb$max_depth[i], min_child_weight = hyper_grid_xgb$min_child_weight[i], subsample = hyper_grid_xgb$subsample[i], colsample_bytree = hyper_grid_xgb$colsample_bytree[i] ) set.seed(1) xgb_tune <- xgb.cv( params = params, data = as.matrix(xgb_data[names(xgb_data) != "target"]), label = xgb_data[["target"]], nrounds = 1000, nfold = 5, objective = "reg:squarederror", verbose = 0, early_stopping_rounds = 10 #stop if no improvement for 10 consecutive trees ) hyper_grid_xgb$optimal_trees[i] <- which.min(xgb_tune$evaluation_log$test_rmse_mean) hyper_grid_xgb$min_RMSE[i] <- min(xgb_tune$evaluation_log$test_rmse_mean) } hyper_grid_xgb %>% arrange(min_RMSE) %>% head(10) get_cv_rmse_xgb_model <- function(data, k = 10, seed = 1) { set.seed(seed) data <- data %>% select(-pickup_date) %>% dummy_cols( select_columns = c("company_name"), remove_first_dummy = TRUE, remove_selected_columns = TRUE ) data <- data[sample(nrow(data)), ] folds <- cut(seq(1, nrow(data)), breaks = k, labels = FALSE) params_final <- list( eta = 0.01, max_depth = 3, min_child_weight = 1, subsample = 0.6, colsample_bytree = 1 ) rmse <- c() for (i in 1:k) { indices <- which(folds == i, arr.ind = TRUE) test_data <- data[indices, ] train_data <- data[-indices, ] xgb_mod <- xgboost( params = params_final, data = as.matrix(train_data[names(train_data) != "target"]), label = train_data[["target"]], nrounds = 365, objective = "reg:squarederror", verbose = 0 ) predicts <- predict(xgb_mod, as.matrix(test_data[,-1])) rmse[i] <- sqrt(mean((predicts - test_data$target) ^ 2)) } mean(rmse) } rmse_xgb_model <- get_cv_rmse_xgb_model(model_data) rmse_xgb_model_alt_data <- get_cv_rmse_xgb_model(model_data_alt) #parameter list params_final <- list( eta = 0.01, max_depth = 3, min_child_weight = 1, subsample = 0.6, colsample_bytree = 1 ) #train final model for xgb xgb_fit_final <- xgboost( params = params_final, data = as.matrix(xgb_data[names(xgb_data) != "target"]), label = xgb_data[["target"]], nrounds = 365, objective = "reg:squarederror", verbose = 0 ) #train final model for ridge on model_data_alt ridge_data_alt <- model_data_alt %>% select(-pickup_date) %>% dummy_cols( select_columns = c("company_name"), remove_first_dummy = TRUE, remove_selected_columns = TRUE ) ridge_fit_final_alt <- cv.glmnet( x = as.matrix(ridge_data_alt[names(ridge_data_alt) != "target"]), y = ridge_data_alt[["target"]], nfolds = 10, standardize = TRUE, alpha = 0 ) saveRDS(params_final, "results/models/xgb_param.rds") saveRDS(xgb_fit_final, "results/models/xgb_model.rds") saveRDS(ridge_fit_final_alt, "results/models/ridge_model_alt.rds") #features importance importance_matrix <- xgb.importance(model = xgb_fit_final) xgb.plot.importance(importance_matrix, measure = "Gain") rf_mod_alt <- randomForest(target ~ . , data = model_data_alt %>% select(-pickup_date)) varImpPlot(rf_mod_alt) #new data for Jan-Feb 2019 new_data <- tibble(date = rep(seq( as_date("2019-01-01"), as_date("2019-02-28"), by = "1 day" ), each = 2), company_name = rep(c("rakuten", "jalan"), length(date) / 2)) %>% mutate( mon_dow = paste(month(date, label = TRUE), as.character(wday(date, label = TRUE)), sep = "_"), mon_dow_encoded = mon_dow_lookup$mon_dow_encoded[match(mon_dow, mon_dow_lookup$mon_dow)], is_start_long_we = holidays$start_long_we[match(date, holidays$day)], events = sapply(date, get_events), is_drop_temp = temp_drop_days$drop[match(date, temp_drop_days$date)], drop = ifelse(is_drop_temp == 1, rush_cancellations$rush_ratio[match(month(date), rush_cancellations$mon)], 0) ) %>% dummy_cols( select_columns = c("company_name"), remove_first_dummy = TRUE, remove_selected_columns = TRUE ) %>% select(-mon_dow,-is_drop_temp) #make final predictions xgb_new_data_preds <- predict(xgb_fit_final, as.matrix(new_data[,-c(1, 5)])) ridge_new_data_preds_alt <- c(predict( ridge_fit_final_alt, s = ridge_fit_final_alt$lambda.min, newx = as.matrix(new_data[,-1]) )) #saving results results <- cbind(new_data[, c(1, 6)], xgb_new_data_preds, ridge_new_data_preds_alt) %>% mutate( xgb_rounded = round(xgb_new_data_preds, 0), ridge_rounded = round(ridge_new_data_preds_alt, 0) ) results_jalan <- subset(results, company_name_jalan == 1) results_rakuten <- subset(results, company_name_jalan == 0) write_csv(results_rakuten[, c(1, 3)], "results/predictions/rakuten_xgb_raw.csv") write_csv(results_rakuten[, c(1, 5)], "results/predictions/rakuten_xgb_rounded.csv") write_csv(results_rakuten[, c(1, 4)], "results/predictions/rakuten_alt_raw.csv") write_csv(results_rakuten[, c(1, 6)], "results/predictions/rakuten_alt_rounded.csv") write_csv(results_jalan[, c(1, 3)], "results/predictions/jalan_xgb_raw.csv") write_csv(results_jalan[, c(1, 5)], "results/predictions/jalan_xgb_rounded.csv") write_csv(results_jalan[, c(1, 4)], "results/predictions/jalan_alt_raw.csv") write_csv(results_jalan[, c(1, 6)], "results/predictions/jalan_alt_rounded.csv")
5935957f35ac93fe257d6b0ab2d1b1e46db33188
7975cb4f2e83c574a4412e65f9f804399e1eb5a0
/man/contributing_basins_at_geom.Rd
398d21ed66f0f9392f46862b0fe39330a76996ae
[]
no_license
jmigueldelgado/assimReservoirs
7ff70516b18e5eb60f1d29ebaf900eed61af8794
7218f36e07b6f1705d2b289dcd3bf7c0da80c5d2
refs/heads/master
2022-12-03T11:58:59.176677
2020-04-06T19:26:07
2020-04-06T19:26:07
null
0
0
null
null
null
null
UTF-8
R
false
true
623
rd
contributing_basins_at_geom.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ident_contributing_basins_gauges.R \name{contributing_basins_at_geom} \alias{contributing_basins_at_geom} \title{Identify contributing basins - sf} \usage{ contributing_basins_at_geom(geom = res_max[res_max$id_jrc == 25283, ]) } \arguments{ \item{geom}{a geometry from an sf object (WGS84, UTM zone=24 south) to identify its contributing basins, e.g. a reservoir from \code{data(res_max)}} } \value{ a geospatial dataframe of all contributin subbasins } \description{ This function identifies contributing basins of an sf geospatial dataframe }
7b1ff3176658098046a45f756e136a04267efe26
070bc33923e734dae0b4be22e1802c73f96e715c
/ifelse.R
74ac95f5fcfc4981b0e478fbb338abae699677ef
[]
no_license
siddarthansaravanan/R-Basics
94235aabb425cf7552cb7fc916c79a8b0c95f619
3d0866e4ed884d122d42a8cdcd7232d8b1584a7a
refs/heads/master
2021-01-21T20:06:56.943468
2017-06-12T08:24:42
2017-06-12T08:24:42
92,189,008
0
0
null
null
null
null
UTF-8
R
false
false
37
r
ifelse.R
x$a<-ifelse(x$b>a$c,"TRUE","FALSE")
701f9b29350a3f91eb13aa87b6db5d8156ddc124
efd73fe53d598844d6e549912d5ba1f2fe602e34
/Parameters and distributions.R
f90f9c355876be6219051726a456da9fa933859e
[ "CC0-1.0" ]
permissive
yhjung1231/Laundry-QMRAproject-2022
ef0611cf2e6395ad672bf217c798f3f7fddc847b
adf2cc09d2e38b96382563662146bfb8dc9362aa
refs/heads/main
2023-07-23T00:19:21.669558
2023-07-08T00:32:17
2023-07-08T00:32:17
386,435,027
0
1
null
null
null
null
UTF-8
R
false
false
7,632
r
Parameters and distributions.R
#File 1 Parameters and distributions library (truncdist) library(triangle) iterations <- 50000 set.seed(100) #Total hand area (cm^2) T.handarea<-runif(iterations, min=445, max=535) #Surface area of laundry Surface.area.laundry<-runif(iterations, min=4.9*10^3, max=1.8*10^4) Item.laundry<-runif(iterations, min=14, max=18) #Fraction of hand surface(hand-surface, hand-face) (unitless) Frac.HS <-runif(iterations,min=0.13, max=0.25) Frac.HF <-runif(iterations, min=0.008, max=0.012) #Duration (min) / Contact time Dur.wash <- 50 Dur.dry <- 60 Contact.time.laundry<-runif(iterations, min=1/60, max=10/60) Contact.time.face.w<-runif(iterations, min=1/60, max=50) Contact.time.face.d<-runif(iterations, min=1/60, max=60) Contact.time.face.f<-runif(iterations, min=1/60, max=60) #Initial concentration on hand/face Conc.i.hand<-0 Conc.i.face<-0 if(organism == "SARS-CoV-2") { #Transfer efficiency TE.dry <-rtrunc(iterations,"norm", mean=0.0003, sd=0.0002, a=0, b=1) TE.wet <-rtriangle(n=iterations, a=0.00001, b=0.0001, c=0.00005) TE.face<-rtrunc(iterations,"norm", mean=0.3390, sd=0.1318, a=0, b=1) #Reduction, inactivation Reduc.wash <-rtrunc(iterations,"norm", mean=3.0, sd=0.02, a=0, b=5.0) Reduc.dry <-rtrunc(iterations, "norm", mean=3.0, sd=0.02, a=0, b=5.0) Reduc.hwash<-runif(iterations, min=2.03, max=5.0) Inact.h <-rtriangle(iterations, a=0.0056, b=0.0125, c=0.0084) Inact.s <-rtriangle(iterations, a=0.000075, b=0.000125, c=0.0001) #Initial concentration on laundry on Sunday Viralload <-runif(iterations,min=3.3*10^6, max=2.35*10^9) Freq.cough.day<-rtriangle(n=iterations, a=240, b=1872, c=864) Volume.cough<-5.964*10^(-3) Conversion.ratio<-runif(iterations, min=100, max=1000) Conc.onecloth<-(Viralload*Volume.cough*Freq.cough.day)/(Conversion.ratio*Surface.area.laundry) Conc.i.laundry<-(Conc.onecloth*exp(-Inact.s*6*24*60)+Conc.onecloth*exp(-Inact.s*5*24*60)+ Conc.onecloth*exp(-Inact.s*4*24*60)+Conc.onecloth*exp(-Inact.s*3*24*60)+ Conc.onecloth*exp(-Inact.s*2*24*60)+Conc.onecloth*exp(-Inact.s*1*24*60)+ Conc.onecloth)/Item.laundry } else if (organism == "Rotavirus") { #Transfer efficiency TE.dry <-rtrunc(iterations,"norm", mean=0.0003, sd=0.0002, a=0, b=1) TE.wet <-rtriangle(n=iterations, a=0.00001, b=0.0001, c=0.00005) TE.face<-rtrunc(iterations,"norm", mean=0.3390, sd=0.1318, a=0, b=1) #Reduction, inactivation Reduc.wash <-rtrunc(iterations,"norm", mean=5.0, sd=0.25, a=0, b=7.0) Reduc.dry <-rtrunc(iterations, "norm", mean=6.9, sd=0.06, a=0, b=9.0) Reduc.hwash<-runif(iterations, min=0.14, max=4.32) Inact.h <-rtrunc(iterations, "norm", mean=0.0045, sd=0.000258, a=0, b=1) Inact.s <-runif(iterations, min=0.0025, max=0.0042) #Initial concentration on laundry on Sunday Conc.feces <-runif(iterations,min=10^10, max=10^12) Mass.feces <-runif(iterations, min=0.01, max=1) Conc.onecloth<-Conc.feces*Mass.feces/(Surface.area.laundry) Conc.i.laundry<-(Conc.onecloth*exp(-Inact.s*6*24*60)+Conc.onecloth*exp(-Inact.s*5*24*60)+ Conc.onecloth*exp(-Inact.s*4*24*60)+Conc.onecloth*exp(-Inact.s*3*24*60)+ Conc.onecloth*exp(-Inact.s*2*24*60)+Conc.onecloth*exp(-Inact.s*1*24*60)+ Conc.onecloth)/Item.laundry #Dose response value alpha<-2.53E-01 N50<-6.17E+00 beta<-N50/(2^(1/alpha)-1) } else if(organism=="Norovirus"){ #Transfer efficiency TE.dry <-rtrunc(iterations,"norm", mean=0.0003, sd=0.0002, a=0, b=1) TE.wet <-rtriangle(n=iterations, a=0.00001, b=0.0001, c=0.00005) TE.face<-rtrunc(iterations,"norm", mean=0.3390, sd=0.1318, a=0, b=1) #Reduction, inactivation (cold, cotton) Reduc.wash <-rtrunc(iterations,"norm", mean=2.4, sd=0.62, a=0, b=4.0) Reduc.dry <-rtrunc(iterations, "norm", mean=6.1, sd=0.10, a=0, b=8.0) Reduc.hwash<-runif(iterations, min=0.91, max=3.78) Inact.h <-runif(iterations, min=0.0002, max=0.0118) Inact.s <-runif(iterations, min=0.0008, max=0.0016) #Initial concentration on laundry on Sunday Viralload <-runif(iterations,min=10^7.71, max=10^10.94) Mass.feces <-runif(iterations, min=0.01, max=1) Conversion.ratio<-runif(iterations, min=100, max=1000) Conc.onecloth<-Viralload*Mass.feces/(Surface.area.laundry*Conversion.ratio) Conc.i.laundry<-(Conc.onecloth*exp(-Inact.s*6*24*60)+Conc.onecloth*exp(-Inact.s*5*24*60)+ Conc.onecloth*exp(-Inact.s*4*24*60)+Conc.onecloth*exp(-Inact.s*3*24*60)+ Conc.onecloth*exp(-Inact.s*2*24*60)+Conc.onecloth*exp(-Inact.s*1*24*60)+ Conc.onecloth)/Item.laundry } else if(organism=="Salmonella"){ #Transfer efficiency TE.dry <-rtrunc(iterations,"norm", mean=0.068, sd=0.070, a=0, b=1) TE.wet <-rtriangle(n=iterations, a=0.00001, b=0.0001, c=0.00003) TE.face<-rtrunc(iterations,"norm", mean=0.3397, sd=0.1604, a=0, b=1) #Reduction, inactivation Reduc.wash <-rtrunc(iterations,"norm", mean=3.4, sd=0.39, a=0, b=5.0) Reduc.dry <-rtrunc(iterations, "norm", mean=7.3, sd=0.42, a=0, b=9.0) Reduc.hwash<-runif(iterations, min=0.6, max=5.8) Inact.h <-runif(iterations, min=0.0275, max=0.0533) Inact.s <-runif(iterations, min=0.0013, max=0.0015) #Initial concentration on laundry on Sunday Conc.feces <-runif(iterations,min=10^6, max=10^10) Mass.feces <-runif(iterations, min=0.01, max=1) Conc.onecloth<-Conc.feces*Mass.feces/Surface.area.laundry Conc.i.laundry<-(Conc.onecloth*exp(-Inact.s*6*24*60)+Conc.onecloth*exp(-Inact.s*5*24*60)+ Conc.onecloth*exp(-Inact.s*4*24*60)+Conc.onecloth*exp(-Inact.s*3*24*60)+ Conc.onecloth*exp(-Inact.s*2*24*60)+Conc.onecloth*exp(-Inact.s*1*24*60)+ Conc.onecloth)/Item.laundry #Dose response value alpha<-2.1E-01 N50<-4.98E+01 beta<-N50/(2^(1/alpha)-1) } else { #E.coli #Transfer efficiency TE.dry <-rtrunc(iterations,"norm", mean=0.068, sd=0.070, a=0, b=1) TE.wet <-rtriangle(n=iterations, a=0.00001, b=0.0001, c=0.00003) TE.face<-rtrunc(iterations,"norm", mean=0.3397, sd=0.1604, a=0, b=1) #Reduction, inactivation (cold, cotton) Reduc.wash <-rtrunc(iterations,"norm", mean=3.5, sd=0.96, a=0, b=6.0) Reduc.dry <-rtrunc(iterations, "norm", mean=8.0, sd=0.93, a=0, b=10.0) Reduc.hwash<-runif(iterations, min=0.6, max=5.8) Inact.h <-runif(iterations, min=0.05, max=0.0753) Inact.s <-runif(iterations, min=0.0015, max=0.023) #Reduction (various options) Reduc.wash.terry <-rtrunc(iterations, "norm", mean=2.1, sd=0.21, a=0, b=4) Reduc.wash.terry.w<- rtrunc(iterations, "norm", mean=2.1, sd=0.11, a=0, b=4.0) Reduc.dry.line<-rtrunc(iterations, "norm", mean=5.2, sd=0.55, a=0, b=7.0) #Initial concentration on laundry on Sunday Conc.feces <-runif(iterations,min=10^7, max=10^9) Mass.feces <-runif(iterations, min=0.01, max=1) Conc.onecloth<-Conc.feces*Mass.feces/Surface.area.laundry Conc.i.laundry<-(Conc.onecloth*exp(-Inact.s*6*24*60)+Conc.onecloth*exp(-Inact.s*5*24*60)+ Conc.onecloth*exp(-Inact.s*4*24*60)+Conc.onecloth*exp(-Inact.s*3*24*60)+ Conc.onecloth*exp(-Inact.s*2*24*60)+Conc.onecloth*exp(-Inact.s*1*24*60)+ Conc.onecloth)/Item.laundry #Dose response value alpha<-1.55E-01 N50<-2.11E+06 beta<-N50/(2^(1/alpha)-1) }
3e1960ddbb75ad6d9a87050ac2a6ab8ceef0b63d
3877ee02e7deec476c64901c474a24ad56dcd431
/man/listMetaGenomes.Rd
668cbc9fbfd6cc7c8e527665c329bb420e303790
[]
no_license
ropensci/biomartr
282d15b64b1d984e3ff8d7d0e4c32b981349f8ca
e82db6541f4132d28de11add75c61624644f6aa1
refs/heads/master
2023-09-04T09:40:15.481115
2023-08-28T15:56:25
2023-08-28T15:56:25
22,648,899
171
34
null
2023-09-14T12:28:02
2014-08-05T15:34:55
R
UTF-8
R
false
true
1,062
rd
listMetaGenomes.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/listMetaGenomes.R \name{listMetaGenomes} \alias{listMetaGenomes} \title{List available metagenomes on NCBI Genbank} \usage{ listMetaGenomes(details = FALSE) } \arguments{ \item{details}{a boolean value specifying whether only the scientific names of stored metagenomes shall be returned (\code{details = FALSE}) or all information such as "organism_name","bioproject", etc (\code{details = TRUE}).} } \description{ List available metagenomes on NCBI genbank. NCBI genbank allows users to download entire metagenomes of several metagenome projects. This function lists all available metagenomes that can then be downloaded via \code{\link{getMetaGenomes}}. } \examples{ \dontrun{ # retrieve available metagenome projects at NCBI Genbank listMetaGenomes() # retrieve detailed information on available metagenome projects # at NCBI Genbank listMetaGenomes(details = TRUE) } } \seealso{ \code{\link{getMetaGenomes}}, \code{\link{getMetaGenomeSummary}} } \author{ Hajk-Georg Drost }
675175004f006922566807d6866e7853584fd66d
e7a321655bd46d1d2b73d38f3e69e161d6d4e186
/man/approx.hessian.vector.product.Rd
ff9709997e42710718d4684c1c848cc68a1fa138
[]
no_license
mattdneal/GPLVM
0594aca5174ed7e0f0ff7f6c17cced54dce1cd14
bd46d9e563c7bdd482837d75442af2d0d23385d8
refs/heads/master
2021-01-21T10:46:26.478446
2019-01-19T09:22:51
2019-01-19T09:22:51
101,985,787
1
0
null
null
null
null
UTF-8
R
false
true
278
rd
approx.hessian.vector.product.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/LSA-BCSGPLVM.R \name{approx.hessian.vector.product} \alias{approx.hessian.vector.product} \title{Title} \usage{ approx.hessian.vector.product(g, x, v) } \arguments{ \item{r}{} } \description{ Title }
b36dfab4438848cbb46611c41fb169dd2b33e0fe
3280b33a933df0b9630ccf2899b11ab9cced792f
/man/get.decode.Rd
0a91d5d82d9ea832a40d94fab7c993bbb208d9fc
[]
no_license
cran/us.census.geoheader
92918a2764fdd8d55d9a650746d3e0a80c67619d
7f1e4f43cc7721890a1e417eb59ed362468fbc48
refs/heads/master
2022-11-12T09:47:56.846318
2020-06-25T09:20:02
2020-06-25T09:20:02
276,713,911
0
0
null
null
null
null
UTF-8
R
false
true
267
rd
get.decode.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/uscgh.R \name{get.decode} \alias{get.decode} \title{(internal) Return the Decode Table} \usage{ get.decode() } \value{ the decode table } \description{ (internal) Return the Decode Table }
2f59c7766d8b544b1de12255feb7e0fae1326044
074aa4a68f3ef87710eb48bc1fc904e1aba41da1
/R/helpers.R
51463c16c45598fbb8e597a197fba4aac2dea5fb
[]
no_license
AuckeBos/keywords-extraction-patient-reviews
bf771ed97e9ac9cfea25cf9720d1946d49d6e71c
aad9330c95333265191b18769938127ca8f167e4
refs/heads/main
2023-06-12T06:33:05.618847
2021-07-05T11:54:34
2021-07-05T11:54:34
383,117,833
0
0
null
null
null
null
UTF-8
R
false
false
1,605
r
helpers.R
# ==================================================================================================================== # # FUNCTIONALITY: # Provide helper functions used in other scripts. # - write_log writes a message to stdout, including timestamp # - start_eta and eta are used for timing long-running functions # ==================================================================================================================== # # Set when start_eta() is ran, used to compute etas during long-running processes timer <- Sys.time() #' Write a log to stdou #' @param msg: The msg write_log <- function(msg) { time <- format(Sys.time(), "%H:%M:%S") print(sprintf("[%s] - %s", time, msg)) } #' Start timer for ETA estimation start_eta <- function() { timer <<- Sys.time() } #' Calculdate ETA #' @param num_done: Number of items done #' @param num_total: Number of items to do in total #' @param message: If provided, write log "sprintf(message, time per entity, eta)". Thus should have 2 %s'es #' @return [eta, time_per_entity (s)]. Only if missing(message) eta <- function(num_done, num_total, message) { time_elapsed <- difftime(Sys.time(), timer, units = "secs") time_per_entity <- time_elapsed / num_done time_todo <- floor((num_total - num_done) * time_per_entity) estimation <- format(Sys.time() + time_todo, "%H:%M:%S") time_per_entity <- round(time_per_entity, 2) if(!missing(message)) { write_log(sprintf(paste("[%s/%s] -", message), num_done, num_total, time_per_entity, estimation)) } else { return(list(estimation, time_per_entity)) } }
e2b5a9d52b8a52fbe509d2d46b564fc9cef84bb5
03e0d13444bff1042ade6d03f6e5e8a35cff353f
/Archive/Main_Sections/Fermentables/fermentablesWeights.R
c2fe070dbc008163fdf9f3d4e376b587620a45f6
[]
no_license
BenjaminBearce/BK_Brew
02ef7bfb7abd8cdbe0a5c250bbbea149b959b1da
f83029854b6d4f78b6001d51bf32e9e91d841ddb
refs/heads/master
2021-01-17T07:10:17.746306
2016-12-11T19:18:53
2016-12-11T19:18:53
45,363,903
0
6
null
2015-12-31T01:19:29
2015-11-02T00:41:15
R
UTF-8
R
false
false
913
r
fermentablesWeights.R
#-----------------------------------------------------------# #---------------------- Grain Weights ----------------------# #-----------------------------------------------------------# fermentablesWeights <- function(grains = FALSE){ if(grains == FALSE){ cat("No beer selected") }else{ OG <- beerSelection$GravityRange %>% str_split("-") %>% unlist() %>% as.numeric() lowerRange <- (OG[1]-1)*1000 higherRange <- OG[2] OG <- mean(c(lowerRange,higherRange))/1000+1 totalGravity <- OG*5.5 #OG*Gal grains <- grains %>% mutate(IngredientGravity = Percentage*totalGravity) %>% mutate(lbsNeeded = IngredientGravity/(((PPG-1)*1000)*BrewHouseConstants$BrewHouseEfficiency_Percent/100)) return(grains) } }
714f47951cd4c576c830cc43cb5ffad67691e208
26039c7524e788d6ca0e1ae1e219dd22606fd6bc
/backends/mxnet/test/gen_r_json.R
91a84d3a1b55c99efc0747e52cc36c2629147f5d
[]
no_license
tomz/deepwater
9976521e5b7560b396aa7bebaf20db75e46a04cb
05b649da1b67fe0c2d01bb5554f6f6015253a98f
refs/heads/master
2020-04-06T06:18:40.843151
2016-08-25T20:14:02
2016-08-25T20:14:02
null
0
0
null
null
null
null
UTF-8
R
false
false
495
r
gen_r_json.R
library(mxnet) #name = "alexnet" #name = "googlenet" #name = "inception-bn" name = "inception-v3" #name = "lenet" #name = "mlp" #name = "resnet" #name = "vgg" source(paste("symbol_", name, ".R", sep = '')) network <- get_symbol(10) cat(network$as.json(), file = paste("symbol_", name, "-R.json", sep = ''), sep = "") name = "unet" source(paste("symbol_", name, ".R", sep = '')) network <- get_symbol() cat(network$as.json(), file = paste("symbol_", name, "-R.json", sep = ''), sep = "")
7de8c93c46183e3ca3e70bd61c6e9ce0f4d00d90
871378e379c6e31796ef961d1f70de2ad454939b
/R/msg.R
dfbb4ef39d44f61bb0a22bb6ed1569a54ccd6635
[]
no_license
cran/ESPRESSO
3cc192f231b3c3c98894714a25366b7eacd1e890
ef9c47486ce493980faa9662fd141a7e1f73f009
refs/heads/master
2021-01-23T03:53:31.983741
2011-04-01T00:00:00
2011-04-01T00:00:00
null
0
0
null
null
null
null
UTF-8
R
false
false
383
r
msg.R
.onAttach = function(libname, pkgname){ packageStartupMessage("\nWE ARE AWARE OF A BUG ON THE LAST VERSION OF THE TOOL AND ARE WORKING TO SORT IT OUT") packageStartupMessage("A NEW VERSION WILL BE AVAILABLE BY THE END OF DECEMBER 2013") packageStartupMessage("WE ARE ALSO WORKING ON A WEB-BASED VERSION THAT WILL BE AVAILABLE AT:\n www.espresso-research.org\n") }
bbbf537bc1f7a2e720237cb1bef9d20bbee95e54
f4e63cf535679b0240a0e05b2b3dcab9496c7f8c
/Machine Learning/machineLearning-Decision tree learning.R
99343577d2dac8b14842d22f7636da9ed8ed8402
[]
no_license
Philip-Abraham/MachineLearning_R
27e77f7c368a316ab43b800251b71a323c66936c
e931955c87a48a87485a25d0ffb59bf3d7b96641
refs/heads/master
2021-07-06T15:59:47.774905
2017-09-27T22:07:26
2017-09-27T22:07:26
105,074,120
0
0
null
null
null
null
UTF-8
R
false
false
3,251
r
machineLearning-Decision tree learning.R
library(titanic) data("titanic_train") titanic <- titanic_train[,c(2,3,5,6)] titanic <- titanic[complete.cases(titanic),] # First, you'll want to split(70/30) the dataset into train and test sets. You'll notice # that the titanic dataset is sorted on titanic$Survived , so you'll need to first # shuffle the dataset in order to have a fair distribution of the output variable # in each set. # Set random seed. Don't remove this line. set.seed(1) # Shuffle the dataset, call the result shuffled n <- nrow(titanic) shuffled <- titanic[sample(n),] # Split the data in train and test train_indices <- 1:round(0.7 * n) train <- shuffled[train_indices, ] test_indices <- (round(0.7 * n) + 1):n test <- shuffled[test_indices, ] # Print the structure of train and test str(train) str(test) # Build a decision tree using the rpart() function of the rpart package. It comes # up with possible feature tests and building a tree with the best of these tests. # Load the rpart, rattle, rpart.plot and RColorBrewer package library(rpart) library(rattle) library(rpart.plot) library(RColorBrewer) # The tree was trained with the Gini impurity criterion, which rpart() uses by default. # Build a tree model: tree tree_g <- rpart(Survived ~ ., data = train, method = "class") # Draw the decision tree fancyRpartPlot(tree_g) # Now you are going to classify the instances that are in the test set # Predict the values of the test set: pred pred_g <- predict(tree_g, test, type="class") # Construct the confusion matrix: conf conf_g <- table(test$Survived, pred_g) conf_g # Print out the accuracy acc_g <- sum(diag(conf_g))/sum(conf_g) acc_g # Around 80 percent of all test instances have been classified correctly. That's not bad! # Change the training criterion to use information gain as splitting criterion tree_i <- rpart(Survived ~ ., data = train, method = "class", parms = list(split = "information")) # Draw the decision tree fancyRpartPlot(tree_i) # Now you are going to classify the instances that are in the test set # Predict the values of the test set: pred pred_i <- predict(tree_i, test, type="class") # Construct the confusion matrix: conf conf_i <- table(test$Survived, pred_i) conf_i # Print out the accuracy acc_i <- sum(diag(conf_i))/sum(conf_i) acc_i # Around 80 percent of all test instances have been classified correctly. That's not bad! #Pruning a complex tree # Calculation of a complex tree set.seed(1) tree <- rpart(Survived ~ ., train, method = "class", control = rpart.control(cp=0.00001)) # Draw the complex tree fancyRpartPlot(tree) # Prune the tree: pruned # The cp argument to be 0.01. This is a complexity parameter. It basically tells # the algorithm to remove node splits that do not sufficiently decrease the impurity. pruned <- prune(tree, cp=0.01) # Draw pruned fancyRpartPlot(pruned) # Another way to check if you overfit your model is by comparing the accuracy on # the training set with the accuracy on the test set. You'd see that the difference # between those two is smaller for the simpler tree. You can also set the cp # argument while learning the tree with rpart() using rpart.control.
5184ad2755a341dd974fe47eb0677c8ca4fa0103
d97273424f84121eaf3a57479b00c8fc3de6bf31
/analyzeCodes/plotTradeRe.R
11148ef416961128bce49bbc95ef7b86953c3c7b
[]
no_license
dvaruas/stock-market-prediction
f05a3d32fd2ad6731d1734a81b3f44b6b4b49522
b7ad99aeb2d9fab6cb9bc3bbf6d9bebd35e2c1f5
refs/heads/master
2022-08-15T06:09:51.644096
2020-05-16T08:57:04
2020-05-16T08:57:04
64,368,402
1
2
null
null
null
null
UTF-8
R
false
false
1,287
r
plotTradeRe.R
getPlotTrade <- function() { ourMethodData <- read.csv('../stock_quotes/ourMethodReturn.csv') baseData <- read.csv('../stock_quotes/baseReturn.csv') dateValues <- as.Date(ourMethodData$date, format = '%d-%m-%y') ourValues <- as.numeric(ourMethodData$returnValue) baseValues <- as.numeric(baseData$returnValue) print(paste('Our Method --> Return rate : ', mean(ourValues), sep = '')) print(paste('Base Method --> Return rate : ', mean(baseValues), sep = '')) # plot(dateValues, ourValues, type = 'o', col = 'red', main = 'Trade Value Comparison', xlab = 'Date', ylab = 'Return Value', pch = 2, ylim = c(-500, 2000)) # par(new = TRUE) # plot(dateValues, baseValues, type = 'o', col = 'green', main = 'Trade Value Comparison', xlab = 'Date', ylab = 'Return Value', ylim = c(-500, 2000), pch = 1) # legend('topright', legend = c('Our Method', 'Hagneau Method'), col = c('red', 'green'), lty = 1, pch = c(2,1)) ## Prepare data for input to barplot breaks <- pretty(range(c(ourValues, baseValues)), n=20) D1 <- hist(ourValues, breaks=breaks, plot=FALSE)$counts D2 <- hist(baseValues, breaks=breaks, plot=FALSE)$counts dat <- rbind(D1, D2) colnames(dat) <- paste(breaks[-length(breaks)], breaks[-1], sep="-") ## Plot it barplot(dat, beside=TRUE, space=c(0, 2), las=2) }
4f970be49051bbc8766b7cb8d6c4c930841e626d
1482c0c2e994197d04c2149eb19ce2f313cd7a45
/R/clustering.R
f78b1f46466a35c455d96d2c05e6cbdb9415f164
[ "MIT" ]
permissive
alexloboda/SVDFunctions
4adffe4b7e101a68b5cf756d8fefee45610303c5
666dbc820f81a3ab03e706fea380deaeb1d6f4f5
refs/heads/master
2023-05-11T13:44:28.623205
2023-03-28T15:12:38
2023-03-28T15:12:38
153,036,232
6
1
null
2019-05-14T17:17:20
2018-10-15T01:28:35
C++
UTF-8
R
false
false
6,396
r
clustering.R
checkTree <- function(t, cs) { if (is.list(t)) { if (length(t) != 2) { stop("Tree must be binary") } cs <- checkTree(t[[1]], cs) checkTree(t[[2]], cs) } else { if (is.null(t) || is.na(t)) { stop("Node must not be null nor NA") } if (!(t %in% cs)) { stop(paste("Cluster in a tree must be mentioned exactly once:"), t) } cs[cs != t] } } deletionName <- "###____toDelete____$$$" cats <- function(file, depth, ...) { cat(spaces(depth), ..., file = file, sep = "") } writePopulationStructure <- function(tree, classes, fd, margin = 0) { node <- tree if (node$isRoot) { cats(fd, 0, "hierarchy:\n") writePopulationStructure(node$children[[1]], classes, fd, 1) return() } if (data.tree::isLeaf(node)) { cats(fd, margin, "cluster:\n") cats(fd, margin + 1 , "id: ", node$id, "\n") cats(fd, margin + 1, "name: ", node$name, "\n") } else { cats(fd, margin, "split:\n") cats(fd, margin + 1, "id: ", node$id, "\n") cats(fd, margin + 1, "left:\n") writePopulationStructure(node$children[[1]], classes, fd, margin + 2) cats(fd, margin + 1, "right:\n") writePopulationStructure(node$children[[2]], classes, fd, margin + 2) } } recTree <- function(tree, hier, classes) { if (length(hier) == 1) { leaf <- tree$AddChild(hier[[1]]) leaf$id <- which(classes == hier[[1]]) } else { node <- tree$AddChild("-#") node$id <- "" recTree(node, hier[[1]], classes) recTree(node, hier[[2]], classes) } } update.nodes <- function(cl) { i <- cl$hier$leafCount + 1 cl$hier$Do(function(node) { if (!node$isRoot && !node$isLeaf) { node$id <- i i <<- i + 1 } }) } #' Constructor for clustering object. #' @param classification named with sample names character vector of #' classes. #' @param hierarchy tree-like structure, a list containing cluster name(leafs) or #' two lists with the same structure. See examples. #' @examples #' samples <- c("C1", "C1", "C2", "C2", "C3", "C4") #' names(samples) <- paste0("sample", 1:6) #' hier <- list("C1", list(list("C2", "C4"), "C3")) #' clustering(samples, hier) #' @export clustering <- function(classification, hierarchy) { classes <- as.character(unique(classification)) map <- setNames(1:length(classes), classes) classification <- setNames(map[as.character(classification)], names(classification)) left <- checkTree(hierarchy, classes) if(length(left) != 0) { stop(paste0("Following classes are not mentioned in herarchy: ", left)) } tree <- data.tree::Node$new("Population structure") recTree(tree, hierarchy, classes) obj <- structure(list(classes = classes, samples = classification, hier = tree), class = "clustering") update.nodes(obj) obj } normalizeClustering <- function(clustering) { ids <- clustering$hier$Get(function(x) x$id, filterFun = data.tree::isLeaf) ids <- ids[order(ids)] map <- list() for (i in 1:length(ids)) { map[ids[i]] <- i } clustering$classes <- names(ids) samples <- names(clustering$samples) clustering$samples <- unlist(map[clustering$samples]) names(clustering$samples) <- samples clustering$hier$Do(function(node) node$id = map[[node$id]], filterFun = data.tree::isLeaf) update.nodes(clustering) clustering } clusterID <- function(clustering, cluster) { ret <- which(clustering$classes == cluster) if (length(ret) == 0) { stop("No such cluster") } ret[1] } findNode <- function(tree, id) { ret <- tree$Get(function(node) { node }, filterFun = function(node) { node$isLeaf && node$id == id }) ret[[1]] } #' Remove cluster from clusutering object with all samples in it. #' @param clustering clustering object. #' @param cluster a name of the cluster to delete or its id. #' @export removeCluster <- function(clustering, cluster) { clustering$hier <- data.tree::Clone(clustering$hier) id <- clusterID(clustering, cluster) tree <- clustering$hier node <- findNode(tree, id) if (node$parent$isRoot) { stop("Can't delete the root of hierarchy tree") } samples <- node$Get(function(node) node$id, filterFun = data.tree::isLeaf) clustering$samples <- clustering$samples[!(clustering$samples %in% samples)] sibling <- node$siblings[[1]] p <- node$parent p$name <- deletionName gp <- p$parent p$AddSiblingNode(sibling) gp$RemoveChild(deletionName) gp$id <- NULL clustering$samples <- clustering$samples[clustering$samples != id] normalizeClustering(clustering) } #' Merge a cluster with its sibling in a tree. #' @param clustering clustering object. #' @param cluster a name of the cluster or its id. #' @export mergeCluster <- function(clustering, cluster) { clustering$hier <- data.tree::Clone(clustering$hier) id <- clusterID(clustering, cluster) node <- findNode(clustering$hier, id) if (node$parent$isRoot) { stop("Can't merge the root of hierarchy tree") } p <- node$parent clusters <- p$Get(function(node) node$id, filterFun = data.tree::isLeaf) clusterNames <- p$Get(function(node) node$name, filterFun = data.tree::isLeaf) id <- clusters[1] clustering$samples[clustering$samples %in% clusters] <- id gp <- p$parent p$name <- deletionName newNode <- data.tree::Node$new(paste(clusterNames, collapse = " | ")) newNode$id <- id p$AddSiblingNode(newNode) gp$RemoveChild(deletionName) gp$id <- "" normalizeClustering(clustering) } #' @export print.clustering <- function(x, ...) { cat("Classes: ", paste(x$classes, collapse = ", "), "\n\n") print(x$hier, "id") } #' @export `[<-.clustering` <- function(x, i = NULL, j = NULL, value) { if (is.null(i)) { if (length(value) != length(x$classes)) { stop("number of items to replace is not a multiple of replacement length") } for (j in 1:length(value)) { x[j] <- value[j] } return(x) } if (i %in% x$classes) { x$classes[x$classes == i] <- value x$hier$Do(function(node) { node$name <- value }, filterFun = function(node) { node$name == i }) return(x) } if (i >= 1 && i <= length(x$classes)) { x$classes[i] <- value x$hier$Do(function(node) { node$name <- value }, filterFun = function(node) { data.tree::isLeaf(node) && node$id == i }) return(x) } stop(paste0("No such cluster: ", i)) }
cb8fdb606e9400e1076c1881021cae979759f480
a8751ed8f4113510037204fb0f03964235fa2250
/man/AgeTrans.Rd
01bbcf0ac04bb7788e14771e54328797d9725f15
[]
no_license
al00014/Biograph
83ed141195adfec30634576e6a99c4ed54cbf0a3
15b46e3416f83964aab1baeaa58d1d92fa4e7e2b
refs/heads/master
2023-04-23T19:58:23.499789
2016-03-31T17:50:43
2016-03-31T17:50:43
null
0
0
null
null
null
null
UTF-8
R
false
false
748
rd
AgeTrans.Rd
\name{AgeTrans} \alias{AgeTrans} \title{Ages at transition} \description{Converts dates at transition to ages at transition} \usage{AgeTrans(Bdata)} \arguments{ \item{Bdata}{Biograph object: data in Biograph format} } \value{ \item{ages}{ages at transition} \item{ageentry}{ages at entry into observation} \item{agecens}{ages at end of observation (censoring)} \item{st_entry}{states occupied at entry into observation} \item{st_censoring}{states occupied at censoring} } \details{ Ages are in years, even when dates are in CMC. } \seealso{YearTrans} \author{Frans Willekens} \note{The sequence of transitions in the component 'ages' is same as in the Biograph object} \examples{ data(GLHS) agetrans <- AgeTrans(Bdata=GLHS) }
d7670ec7bf3db2fab14df1bf5414e3b5e20b0180
00bf0bbb222c10aae4625b0ed5046d4b8b0e7c37
/refm/api/src/forwardable.rd
80e64eff400316ab28cb7d8080ed2fa4fe7a9657
[]
no_license
foomin10/doctree
fe6a7097d544104fe71678121e6764d36a4b717a
a95789a60802c8f932c0a3e9ea21a4fc2058beb8
refs/heads/master
2023-02-18T20:00:32.001583
2023-02-05T00:49:18
2023-02-05T00:49:18
32,222,138
1
0
null
2015-03-14T16:54:52
2015-03-14T16:54:51
null
UTF-8
R
false
false
7,131
rd
forwardable.rd
category DesignPattern クラスやオブジェクトに、メソッドの委譲機能を追加するためのライブラリです。 #@#以下のモジュールが定義されます。 #@# * [[c:Forwardable]] #@# * [[c:SingleForwardable]] #@#詳細は [[unknown:"ruby-src:doc/forwardable.rd.ja"]] を参照してください。 === 参考 * Rubyist Magazine 0012 号 標準添付ライブラリ紹介【第 6 回】委譲 ([[url:https://magazine.rubyist.net/articles/0012/0012-BundledLibraries.html]]) = module Forwardable クラスに対し、メソッドの委譲機能を定義するモジュールです。 === 使い方 クラスに対して [[m:Object#extend]] して使います。[[m:Module#include]] でないところに注意して下さい。 例: require 'forwardable' class Foo extend Forwardable def_delegators("@out", "printf", "print") def_delegators(:@in, :gets) def_delegator(:@contents, :[], "content_at") end f = Foo.new f.printf ... f.gets f.content_at(1) == Singleton Methods --- debug -> bool 委譲部分をバックトレースに含めるかどうかの状態を返します。 バックトレースを含める設定となっている時、真を返します。 デフォルトは含めない設定となっています。 --- debug= -> bool 委譲部分をバックトレースに含めるかどうかの状態を設定します。 == Instance Methods --- def_instance_delegators(accessor, *methods) -> () --- def_delegators(accessor, *methods) -> () メソッドの委譲先をまとめて設定します。 @param accessor 委譲先のオブジェクト @param methods 委譲するメソッドのリスト 委譲元のオブジェクトで methods のそれぞれのメソッドが呼び出された場合に、 委譲先のオブジェクトの同名のメソッドへ処理が委譲されるようになります。 def_delegators は def_instance_delegators の別名になります。 また、以下の 2 つの例は同じ意味です。 def_delegators :@records, :size, :<<, :map def_delegator :@records, :size def_delegator :@records, :<< def_delegator :@records, :map @see [[m:Forwardable#def_delegator]] --- def_instance_delegator(accessor, method, ali = method) -> () --- def_delegator(accessor, method, ali = method) -> () メソッドの委譲先を設定します。 @param accessor 委譲先のオブジェクト @param method 委譲先のメソッド @param ali 委譲元のメソッド 委譲元のオブジェクトで ali が呼び出された場合に、 委譲先のオブジェクトの method へ処理が委譲されるようになります。 委譲元と委譲先のメソッド名が同じ場合は, ali を省略することが可能です。 def_delegator は def_instance_delegator の別名になります。 例: require 'forwardable' class MyQueue extend Forwardable attr_reader :queue def initialize @queue = [] end def_delegator :@queue, :push, :mypush end q = MyQueue.new q.mypush 42 q.queue # => [42] q.push 23 # => NoMethodError @see [[m:Forwardable#def_delegators]] #@since 1.9.1 --- instance_delegate(hash) -> () --- delegate(hash) -> () メソッドの委譲先を設定します。 @param hash 委譲先のメソッドがキー、委譲先のオブジェクトが値の [[c:Hash]] を指定します。キーは [[c:Symbol]]、 [[c:String]] かその配列で指定します。 #@# ruby-core:05899 のパッチに付いてたテストコードより。 例: require 'forwardable' class Zap extend Forwardable delegate :length => :@str delegate [:first, :last] => :@arr def initialize @arr = %w/foo bar baz/ @str = "world" end end zap = Zap.new zap.length # => 5 zap.first # => "foo" zap.last # => "baz" == Constants --- FORWARDABLE_VERSION -> String [[lib:forwardable]] ライブラリのバージョンを返します。 #@end = module SingleForwardable オブジェクトに対し、メソッドの委譲機能を定義するモジュールです。 === 使い方 オブジェクトに対して extend して使います。 例: require 'forwardable' g = Goo.new g.extend SingleForwardable g.def_delegator("@out", :puts) g.puts ... また、[[c:SingleForwardable]] はクラスやモジュールに対して以下のようにする事もできます。 require 'forwardable' class Implementation def self.service puts "serviced!" end end module Facade extend SingleForwardable def_delegator :Implementation, :service end Facade.service # => serviced! もし [[c:Forwardable]] と [[c:SingleForwardable]] の両方を使いたい場合、 #@since 1.9.1 def_instance_delegator と def_single_delegator メソッドの方を呼び出して ください。 #@else def_instance_delegator と def_singleton_delegator メソッドの方を呼び出 してください。 #@end == Instance Methods #@since 1.9.1 --- def_single_delegators(accessor, *methods) -> () #@else --- def_singleton_delegators(accessor, *methods) -> () #@end --- def_delegators(accessor, *methods) -> () メソッドの委譲先をまとめて設定します。 @param accessor 委譲先のオブジェクト @param methods 委譲するメソッドのリスト 委譲元のオブジェクトで methods のそれぞれのメソッドが呼び出された場合に、 委譲先のオブジェクトの同名のメソッドへ処理が委譲されるようになります。 def_delegators は def_singleton_delegators の別名になります。 また、以下の 2 つの例は同じ意味です。 def_delegators :@records, :size, :<<, :map def_delegator :@records, :size def_delegator :@records, :<< def_delegator :@records, :map @see [[m:SingleForwardable#def_delegator]] #@since 1.9.1 --- def_single_delegator(accessor, method, ali = method) -> () #@else --- def_singleton_delegator(accessor, method, ali = method) -> () #@end --- def_delegator(accessor, method, ali = method) -> () メソッドの委譲先を設定します。 @param accessor 委譲先のオブジェクト @param method 委譲先のメソッド @param ali 委譲元のメソッド 委譲元のオブジェクトで ali が呼び出された場合に、 委譲先のオブジェクトの method へ処理が委譲されるようになります。 委譲元と委譲先のメソッド名が同じ場合は, ali を省略することが可能です。 def_delegator は def_singleton_delegator の別名になります。 @see [[m:SingleForwardable#def_delegators]] #@since 1.9.1 --- single_delegate(hash) -> () --- delegate(hash) -> () メソッドの委譲先を設定します。 @param hash 委譲先のメソッドがキー、委譲先のオブジェクトが値の [[c:Hash]] を指定します。キーは [[c:Symbol]]、 [[c:String]] かその配列で指定します。 @see [[m:Forwardable#delegate]] #@end
e6a26c8639d8935b0ad36f2676d9dfaf11d47b1c
431860954259d02f7768dd02e6554badbf6faacc
/man/getindexcat.Rd
8bd713957bd6e6737c1a9af2b048df3c15dc998b
[]
no_license
nicolas-robette/GDAtools
5e6a7d4454d5edac3fab9bfa202f96ceddadfc66
4708925717cb4d0cd957faa46fd813dfcd860c41
refs/heads/master
2023-07-07T02:54:15.110104
2023-06-29T18:58:32
2023-06-29T18:58:32
214,293,710
5
3
null
2021-06-11T08:41:34
2019-10-10T22:04:12
R
UTF-8
R
false
false
794
rd
getindexcat.Rd
\name{getindexcat} \alias{getindexcat} \title{Names of the categories in a data frame} \description{Returns a vector of names corresponding the the categories in a data frame exclusively composed of categorical variables.} \usage{getindexcat(data)} \arguments{ \item{data}{data frame of categorical variables} } \details{This function may be useful prior to a specific MCA, to identify the indexes of the 'junk' categories to exclude.} \value{Returns a character vector with the names of the categories of the variables in the data frame} \author{Nicolas Robette} \seealso{ \code{\link{ijunk}}, \code{\link{speMCA}}, \code{\link{csMCA}} } \examples{ data(Music) getindexcat(Music[,1:5]) mca <- speMCA(Music[,1:5], excl = c(3,6,9,12,15)) } \keyword{ multivariate } \keyword{ misc }
d272b514c99774e124da82f6e84e7720324bd9c6
5702c21b14d615d637b7d75ee696d55e5034be43
/man/layout_1d_graphics.Rd
3c03b172504d0a27533965fe3a3b7484c07c4d42
[]
no_license
great-northern-diver/zenplots
90219a66099df5bd7c934d36cd202d250801bdc5
2f5431dc2ba3318e8b23349804996757ea81910a
refs/heads/master
2023-09-05T04:09:22.049308
2023-08-25T17:12:01
2023-08-25T17:12:01
198,439,000
3
2
null
2023-08-25T17:12:03
2019-07-23T13:43:12
R
UTF-8
R
false
true
459
rd
layout_1d_graphics.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot1dgraphics.R \name{layout_1d_graphics} \alias{layout_1d_graphics} \title{Layout plot in 1d} \usage{ layout_1d_graphics(zargs, ...) } \arguments{ \item{zargs}{argument list as passed from \code{\link{zenplot}()}} \item{...}{additional arguments passed to label_1d_graphics()} } \value{ invisible() } \description{ Layout plot in 1d } \author{ Marius Hofert and Wayne Oldford }
09252cff48df8ee06694fffa50c75424bfa46266
1eeb158bacf26f51928087935110bff8148a88fb
/man/HTestimator.Rd
def6b5774dbe66ee5534ff54598f1ef166321ae1
[]
no_license
cran/sampling
5a97b7b5b14a7c27c7fd64bf68c72531ccb50128
2f974546de2b26dc5d71b44e3c56f8cc4e6bd696
refs/heads/master
2021-06-05T21:02:00.219517
2021-01-13T10:50:05
2021-01-13T10:50:05
17,699,455
3
2
null
null
null
null
UTF-8
R
false
false
829
rd
HTestimator.Rd
\name{HTestimator} \alias{HTestimator} \title{The Horvitz-Thompson estimator} \description{Computes the Horvitz-Thompson estimator of the population total.} \usage{HTestimator(y,pik)} \arguments{ \item{y}{vector of the variable of interest; its length is equal to n, the sample size.} \item{pik}{vector of the first-order inclusion probabilities; its length is equal to n, the sample size.} } \seealso{ \code{\link{UPtille}} } \examples{ data(belgianmunicipalities) attach(belgianmunicipalities) # Computes the inclusion probabilities pik=inclusionprobabilities(Tot04,200) N=length(pik) n=sum(pik) # Defines the variable of interest y=TaxableIncome # Draws a Poisson sample of expected size 200 s=UPpoisson(pik) # Computes the Horvitz-Thompson estimator HTestimator(y[s==1],pik[s==1]) } \keyword{survey}
cfb2474d30a29f7b42290a6fdb031336bf1e4d5b
aa4b646ee65be2bc6a5903b4cbc31f99dd0ea18f
/R/03_Plotting_Results.R
2ccb552dfba44a558c4aafab566617519e1803fd
[]
no_license
aaronweinstock/mls-preseason18-simulation
2793429c7a233eb37a8f7e6ab77021ac5f48542a
10ec389c50cbc871302b886c5fc288f5ff414c5a
refs/heads/master
2020-04-10T21:28:02.921337
2018-12-11T09:07:40
2018-12-11T09:07:40
161,297,204
0
0
null
null
null
null
UTF-8
R
false
false
2,232
r
03_Plotting_Results.R
# install.packages("ggplot2") # install.packages("gridExtra") library(ggplot2) library(grid) library(gridExtra) # Read in simulation probabilities prob = readRDS("Data/R_Data/Example_Probabilities.rds") # Plot function plot_sim_results = function(prob){ # Reorganize simulated probabilities for plotting using ggplot oddsplot = data.frame(team=rep(prob$team, times=2), odds=c(prob$playoff, prob$bye), type=rep(c("playoff","bye"), each=23), conf=rep(prob$conf), times=2) oddsplot$team = factor(oddsplot$team, levels = unique(oddsplot$team[order(oddsplot$odds[oddsplot$type=="playoff"], oddsplot$odds[oddsplot$type=="bye"])])) oddsplot$type = factor(oddsplot$type, levels=c("playoff","bye")) # Create the plot; split by conference for more relevant interpretation plots_by_conf = lapply(c("East","West"), function(x){ ggplot(data=oddsplot[oddsplot$conf == x,]) + geom_tile(aes(x=type, y=team, fill=odds), color="white") + geom_text(aes(x=type, y=team), label=scales::percent(oddsplot$odds[oddsplot$conf == x]), size = 3) + scale_x_discrete(labels = c("Make Playoffs", "Win Division", "Get Wild Card"), name = "", expand = c(0,0), position = "top") + scale_y_discrete(name = "", expand = c(0,0)) + scale_fill_gradient(low = "#FFFFFF", high = "#FF0000", limits=c(0,1)) + labs(title = paste(x, "Playoff Odds")) + theme(axis.ticks.x = element_blank(), axis.ticks.y = element_blank(), axis.title.x = element_blank(), axis.title.y = element_blank(), plot.title = element_text(hjust = 0.5, face="bold"), legend.position="none") }) # Add title and format using grid.arrange t = textGrob("Simulated Preseason MLS 2018 Playoff Likelihoods", hjust=0.35, gp=gpar(fontsize=18,font=2)) grid.arrange(plots_by_conf[[1]], plots_by_conf[[2]], ncol = 2, top = t) } plot_sim_results(prob)
756364fd1e79231cdef25a1dd777a288ae5b0f5e
59fb03eed32d0fec98930e6f937263e5ce696d9b
/R/checkargs.R
dc914ee13d869c441d3f555b3c4db63bf7fdbb89
[]
no_license
vathymut/forestError
8a66469e65509f867e6993a18c7ca11ff046dc27
5d9c4b2d01e2ad65bdedf1bdd499ba342e27be8d
refs/heads/master
2023-07-11T13:59:58.474788
2021-08-10T19:22:23
2021-08-10T19:22:23
null
0
0
null
null
null
null
UTF-8
R
false
false
4,263
r
checkargs.R
# check forest argument for problems checkForest <- function(forest) { if (typeof(forest) != "list") { stop("'forest' is not of the correct type") } else if (!any(c("randomForest", "ranger", "rfsrc", "quantregForest") %in% class(forest))) { stop("'forest' is not of the correct class") } else if (is.null(forest$inbag)) { stop("'forest' does not have record of which training observations are in bag for each tree. Re-fit the random forest with argument keep.inbag = TRUE") } } # check training and test covariate arguments for problems checkXtrainXtest <- function(X.train, X.test) { if (length(dim(X.train)) != 2) { stop("'X.train' must be a matrix or data.frame of dimension 2") } else if (length(dim(X.test)) != 2) { stop("'X.test' must be a matrix or data.frame of dimension 2") } else if (ncol(X.train) != ncol(X.test)) { stop("'X.train' and 'X.test' must have the same predictor variables") } } # check training response argument for problems checkYtrain <- function(forest, Y.train, n.train) { if ("ranger" %in% class(forest)) { if (is.null(Y.train)) { stop("You must supply the training responses (Y.train)") } else if (length(Y.train) != n.train) { stop("Number of training responses does not match number of training observations") } } } # check type-I error rate argument for problems checkAlpha <- function(alpha) { if (typeof(alpha) != "double") { stop("'alpha' must be of type double") } else if (any(alpha <= 0 | alpha >= 1)) { stop("'alpha' must be in (0, 1)") } } # check index argument in perror and qerror functions for problems checkxs <- function(xs, n.test) { if (max(xs) > n.test | min(xs) < 1) { stop("Test indices are out of bounds") } else if (any(xs %% 1 != 0)) { stop("Test indices must be whole numbers") } } # check probability argument in qerror for problems checkps <- function(p) { if (max(p) > 1 | min(p) < 0) { stop("Probabilities must be between 0 and 1") } } # check core argument for problems checkcores <- function(n.cores) { if (is.null(n.cores)) { stop("Number of cores must be specified") } else if (n.cores < 1) { stop("Number of cores must be at least 1") } else if (n.cores %% 1 != 0) { stop("Number of cores must be integer") } } # check requested parameters checkwhat <- function(what, forest) { if (is.null(what)) { stop("Please specify the parameters to be estimated") } else if ("mcr" %in% what & length(what) > 2) { stop("Misclassification rate cannot be estimated for real-valued responses") } else if ("mcr" %in% what) { if ("quantregForest" %in% class(forest)) { if (forest$type != "classification") { stop("Misclassification rate can be estimated only for classification random forests") } } else if ("randomForest" %in% class(forest)) { if (forest$type != "classification") { stop("Misclassification rate can be estimated only for classification random forests") } } else if ("rfsrc" %in% class(forest)) { if (forest$family != "class") { stop("Misclassification rate can be estimated only for classification random forests") } } else if ("ranger" %in% class(forest)) { if (forest$treetype != "Classification") { stop("Misclassification rate can be estimated only for classification random forests") } } } else if (any(c("mspe", "bias", "interval", "p.error", "q.error") %in% what)) { if ("quantregForest" %in% class(forest)) { if (forest$type == "classification") { stop("Requested parameters cannot be estimated for classification random forests") } } else if ("randomForest" %in% class(forest)) { if (forest$type == "classification") { stop("Requested parameters cannot be estimated for classification random forests") } } else if ("rfsrc" %in% class(forest)) { if (forest$family == "class") { stop("Requested parameters cannot be estimated for classification random forests") } } else if ("ranger" %in% class(forest)) { if (forest$treetype == "Classification") { stop("Requested parameters cannot be estimated for classification random forests") } } } }
b58514fdd16e1d37f4ec8e6d2c79965a304e52ee
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
/codeml_files/newick_trees_processed_and_cleaned/10055_0/rinput.R
a7b5fc7101ec55dc6678c08848845dc377db5a67
[]
no_license
DaniBoo/cyanobacteria_project
6a816bb0ccf285842b61bfd3612c176f5877a1fb
be08ff723284b0c38f9c758d3e250c664bbfbf3b
refs/heads/master
2021-01-25T05:28:00.686474
2013-03-23T15:09:39
2013-03-23T15:09:39
null
0
0
null
null
null
null
UTF-8
R
false
false
137
r
rinput.R
library(ape) testtree <- read.tree("10055_0.txt") unrooted_tr <- unroot(testtree) write.tree(unrooted_tr, file="10055_0_unrooted.txt")
ad7f57bea3e442c91fecb4160190e8c0402f93a3
7212c1fea0fd1e286ca10e978d24c9cc4ef05af7
/TLC.R
aa9320e4082417233f13a292369b7d7163590a52
[]
no_license
maps16/Estadistica
92635e96f254946572dd4b8c0d82dcb4c028bd3a
c1bfd6c4123378903fc8cb8e83824c85e906f1e2
refs/heads/master
2021-01-10T07:11:55.266656
2016-04-04T17:02:46
2016-04-04T17:02:46
52,023,966
0
0
null
null
null
null
UTF-8
R
false
false
600
r
TLC.R
#Escenario de Simulacion: Normal n = 30 media0 = 10 sigma0 = 1 M = 20000 #Calculo de la frecuencia de Cobertura z = c() for(i in 1:M){ y = rnorm(n, media0, sigma0) z[i] = (sqrt(n)*(mean(y)-media0))/sigma0 } x = z hist(x, freq = FALSE, col = blues9) curve(dnorm(x,0,1),col=2, add=TRUE) #Escenario de Simulacion: Exp n = 30 lamda=100 media0 = lamda sigma0 = lamda M = 10000 #Calculo de la frecuencia de Cobertura z = c() for(i in 1:M){ y = rexp(n, 1/lamda) z[i] = (sqrt(n)*(mean(y)-media0))/sigma0 } x = z hist(x, freq = FALSE, col = blues9) curve(dnorm(x,0,1),col=2, add=TRUE)
e055e32f77978525d47d42faca676222c21aa356
8740aaffc97232135206f322898ac0295af678f9
/4. Data Manipulation with dplyr/arrange1.R
e7b7c70c1bbd187490aba4738c40c9df4376294f
[]
no_license
ThanitsornMsr/Datacamp
9b79f56a04eb4aaa30ee8e846b7d52a26d2bbcb4
fa22e61edebf12b6a8b47df95f1b221fc9a693f1
refs/heads/master
2022-08-03T08:32:51.554392
2020-05-22T11:35:19
2020-05-22T11:35:19
266,045,917
0
0
null
null
null
null
UTF-8
R
false
false
224
r
arrange1.R
counties_selected <- counties %>% select(state, county, population, private_work, public_work, self_employed) # Add a verb to sort in descending order of public_work counties_selected %>% arrange(desc(public_work))
274439d2fdda3980bc3515a686dbce596a468e06
95d4670a6eee58e1530f38bd430f2f1c77d5660b
/plot4.R
b6074f2c583c60a966996419330ab5d6a684f38b
[]
no_license
Kostasstam/ExData_Plotting1
797c948e7ecb30018d9b71812d82f5d14474db2a
14c95f4fa200f1ca7a3a84b956b023b81649d27a
refs/heads/master
2020-05-23T11:14:51.233412
2017-01-29T20:49:54
2017-01-29T20:49:54
80,368,653
0
0
null
2017-01-29T19:57:39
2017-01-29T19:57:39
null
UTF-8
R
false
false
1,348
r
plot4.R
#Read table library(dplyr) fulldata<-read.table("household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".", as.is = TRUE) str(fulldata) data<-filter(fulldata, Date=="1/2/2007" | Date=="2/2/2007") dim(data) head(data) str(data) data$Global_active_power<-as.numeric(data$Global_active_power) dateandtime <- strptime(paste(data$Date, data$Time, sep=" "), "%d/%m/%Y %H:%M:%S") data$Sub_metering_1<-as.numeric(data$Sub_metering_1) data$Sub_metering_2<-as.numeric(data$Sub_metering_2) data$Sub_metering_3<-as.numeric(data$Sub_metering_3) data$Voltage<-as.numeric(data$Voltage) str(data) #Plot 4 png("plot4.png", width=480, height=480, units = "px") par(mfrow = c(2, 2)) plot(dateandtime, data$Global_active_power, type="l", xlab="", ylab="Global Active Power", cex=0.2) plot(dateandtime, data$Voltage, type="l", xlab="datetime", ylab="Voltage") plot(dateandtime, data$Sub_metering_1, type="l", ylab="Energy Submetering", xlab="") lines(dateandtime, data$Sub_metering_2, type="l", col="red") lines(dateandtime, data$Sub_metering_3, type="l", col="blue") legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=, lwd=2.5, col=c("black", "red", "blue"), bty="o") plot(dateandtime, data$Global_active_power, type="l", xlab="datetime", ylab="Global_reactive_power") dev.off()
1e0cf149c5524926a23769992c6830157c7e5af9
023f423499a21441ba85e2a7f646229b0592ead8
/analysis/runScript.R
93a52165337e4ee019935cf9481a18e346dd12a4
[]
no_license
YanqiangLi/lincp21
a7c25782423e7575525d499fc41a434263efb23b
0c020fd8a433b86a7b5f9b5ad7fb6ea58c6ea3dd
refs/heads/master
2021-01-02T22:36:54.650584
2016-06-30T18:15:50
2016-06-30T18:15:50
null
0
0
null
null
null
null
UTF-8
R
false
false
592
r
runScript.R
dat<-read.csv("autoanalysisInfo.csv",header=TRUE,stringsAsFactors=FALSE) library(knitr) i<-as.numeric((commandArgs(TRUE)[1])) filename<-dat$filename[i] print(filename) print(dat$strain[i]) dir.create(filename) setwd(filename) strain<-dat$strain[i] timepoint<-dat$sample[i] tissue<-dat$sample[i] alpha<-0.05 dir<-dat$dir[i] #/n/rinn_data2/users/agroff/seq/OtherMice/Lincp21/analysis/SupplementalFileX_Lincp21_StrainTemplate.Rmd knit2html('/n/rinn_data2/users/agroff/seq/OtherMice/Lincp21/analysis/SupplementalFileX_Lincp21_StrainTemplate.Rmd',output=paste(filename,".md",sep=""), quiet=TRUE)
e791929514a7db3aed9a3104dfe1c311e97632c5
d075e77fbe941830535e4df7121694305b42938a
/evaluation.R
7b1c0e1388cc2f7e9df209160a956d29c3c290fc
[]
no_license
skyler120/MSGL
c4493ea8cc62e0d8d9464cc8ec036b07f5ecec31
e39432f80424802b932429b4b0876b1d1e81a4f8
refs/heads/master
2021-01-22T04:14:25.441575
2017-05-26T06:03:48
2017-05-26T06:03:48
92,442,088
0
0
null
null
null
null
UTF-8
R
false
false
278
r
evaluation.R
library(pROC) evaluate_beta <- function(beta, beta_true,y_hat,y,tol=1e-6){ beta_close = sum(abs(beta-beta_true)<tol)/length(beta) myROC = roc(y, y_hat) return(list(prop = beta_close, sens = myROC$sensitivities, spec = myROC$specificities,auc = myROC$auc)) }
54d2794c74fd88148fec767dde17464d991422c9
a9a9af4f010a883720f70391d2af66f437cb15c3
/man/retrieve_abc_experiment_for_plotting.Rd
82bc3fb3815c6a5073495e456b9eb9efc782e215
[]
no_license
kalden/spartanDB
ad4162c78ef54170c21c08a8a7a822fafc457636
bc698715cdce55f593e806ac0c537c3f2d59ac7a
refs/heads/master
2020-03-26T23:32:14.724243
2019-02-20T11:05:17
2019-02-20T11:05:17
145,549,860
0
0
null
null
null
null
UTF-8
R
false
true
934
rd
retrieve_abc_experiment_for_plotting.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/emulated_abc_to_db.R \name{retrieve_abc_experiment_for_plotting} \alias{retrieve_abc_experiment_for_plotting} \title{Retrieve posteriors from database and produce density plots} \usage{ retrieve_abc_experiment_for_plotting(dblink, parameters, experiment_id = NULL, experiment_description = NULL, experiment_date = Sys.Date()) } \arguments{ \item{dblink}{A link to the database in which this table is being created} \item{parameters}{Simulation parameters being examined} \item{experiment_id}{Experiment ID for the ABC results. May be NULL if description and date specified} \item{experiment_description}{A description of this ABC experiment. May be NULL if adding by experiment ID} \item{experiment_date}{Date experiment created. May be NULL if adding by experiment ID} } \description{ Retrieve posteriors from database and produce density plots }
545d45959cd63efbe2fe48deca9ec907ae433bf7
aa4105b401fbe639e71dbf5093615317dfe47139
/RProgramming/cacheMatrix.R
2aa46a0cc5494f30cf73295e0d5ce7250975097b
[]
no_license
cchmusso/datasciencecoursera
0d02b6d0c8f4c1f8b3475ef3cb644ab4275ac7ae
8787faf3a5cda90b6c73a191fded7ab4084aeee0
refs/heads/gh-pages
2021-01-10T13:25:55.420461
2018-01-21T18:33:57
2018-01-21T18:33:57
46,266,815
0
0
null
2018-01-21T18:33:58
2015-11-16T10:07:38
HTML
UTF-8
R
false
false
1,682
r
cacheMatrix.R
# Matrix inversion is computationally expensive. In order to eliminate # redundant evaluation, the functions in this module will allow the user to # create a matrix that, upon calculation, caches its inverse and returns that # inverse on subsequent queries until the original matrix changes (invalidating # the cached matrix) # makeCacheMatrix returns a list containing a matrix that is capable of caching # its inverse in order to eliminate redundant and unnecessary computation. makeCacheMatrix <- function(x = matrix()) { inv <- NULL # cached value of inverse, if exists set <- function(y) { inv <<- NULL # invalidate cached inverse x <<- y # save matrix to this functions environment } get <- function() x # return matrix setinv <- function(inverse) inv <<- inverse # cache the inverse getinv <- function() inv # return cached inverse, if any # return list of 4 functions list(set=set, get=get, setinv=setinv, getinv=getinv) } # cacheSolve will return the inverse of matrix contained within its # first argument. Upon first calculation, the 'solve' function is called to # calculate the returned inverse and cache it; subsequent calls return the cached value. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' inv <- x$getinv() if(!is.null(inv)) { # if cached inverse exists, return it message("getting cached data") return(inv) } else { # no cached inverse, data <- x$get() # extract the underlying data inv <- solve(data, ...) # calculate its inverse x$set(inv) # cache the inverse for re-use return(inv) } }
c8ef9d75550cce401b2fa464358b2c115698037a
a702380ea7f842b78885777855dada88ac329f0d
/man/PLS4jack.Rd
cbb1863729d60a2bb418192b820dfc1edea69053
[]
no_license
HerveAbdi/data4PCCAR
bc077569605bca5119814bc7ca8b155c3fcc4141
78478d1ad5b3b1eeb88a3448d417694129bc70d8
refs/heads/master
2022-09-14T05:05:24.686219
2022-09-04T21:24:43
2022-09-04T21:24:43
129,653,026
8
2
null
null
null
null
UTF-8
R
false
true
1,948
rd
PLS4jack.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/PLS_jack_svds_HA.R \name{PLS4jack} \alias{PLS4jack} \title{in PLS regression (PLSR) compute a supplementary projection for a jackknifed estimation of one supplementary element. The prediction is performed for 1 to \code{nfactor} latent variables.} \usage{ PLS4jack(X, Y, xsup, nfactor) } \arguments{ \item{X}{the \strong{X} matrix of predictors in the PLSR model.} \item{Y}{the \strong{Y} matrix to be predicted by tge PLSR model.} \item{xsup}{the supplementary elements whose \strong{Y} values are to be predicted.} \item{nfactor}{number of factors of the model.} } \value{ \code{Yhatsup} the matrix of the predicted values. } \description{ in PLS regression (PLSR),\code{PLS4jack}: computes a supplementary projection for a jackknifed estimation of one supplementary element. The prediction is computed for 1 to \code{nfactor} latent variables. \code{PLS4jack} is mainly used by \code{\link{PLSR_SVD}} for computing the random effect prediction of jackknifed observations in PLSR, but it can also be used to project supplementary observation in PLSR. } \details{ see Abdi (2010) for details and examples. } \seealso{ \code{\link{PLSR_SVD}} } \author{ Hervé Abdi, Lei Xuan #' @references (see also \code{https://personal.utdallas.edu/~herve/}) \enumerate{ \item Abdi, H. (2010). Partial least square regression, projection on latent structure regression, PLS-Regression. \emph{Wiley Interdisciplinary Reviews: Computational Statistics, 2}, 97-106. \item Abdi, H. (2007). Partial least square regression (PLS regression). In N.J. Salkind (Ed.): \emph{Encyclopedia of Measurement and Statistics}. Thousand Oaks (CA): Sage. pp. 740-744. \item Abdi. H. (2003). Partial least squares regression (PLS-regression). In M. Lewis-Beck, A. Bryman, T. Futing (Eds): \emph{Encyclopedia for Research Methods for the Social Sciences}. Thousand Oaks (CA): Sage. pp. 792-795. } }
ca0e41ef93839618815b33d7faae4c51d3a832be
6f32382cf98a130b7e7f518f0d6e760a7006e09d
/Paper1/Figure5/1.visualize_PCAs.R
2f0aeb6265674367626917ac4e968bc4ec4bc16a
[]
no_license
DanChitwood/PassifloraLeaves
0df07ca52193932aa4b107a4e4cfaa7a9fe9e8e9
493c28d04f0a43bb4d7f3d0b90dfcab6ec83622c
refs/heads/master
2021-01-01T03:44:51.938914
2017-12-03T19:12:32
2017-12-03T19:12:32
56,823,168
6
0
null
null
null
null
UTF-8
R
false
false
1,729
r
1.visualize_PCAs.R
#Read in ggplot2 library(ggplot2) #Read in data data <- read.table("./0.classes_and_heteroblasty.txt", header=TRUE) #Visualize landmark PCA by species class p <- ggplot(data=data, aes(land_pc1, land_pc2, colour=class)) p + geom_point(size=3, alpha=0.6) + theme_bw() + scale_colour_brewer(type="qual", palette=2) p <- ggplot(data=data, aes(land_pc3, land_pc4, colour=class)) p + geom_point(size=3, alpha=0.6) + theme_bw() + scale_colour_brewer(type="qual", palette=2) #Visualize landmark PCA by heteroblasty p <- ggplot(data=data, aes(land_pc1, land_pc2, colour=cont_hetero)) p + geom_point(size=3, alpha=0.6) + theme_bw() + scale_colour_gradient2(low="black",mid="dodgerblue",high="yellow", midpoint=6.5) p <- ggplot(data=data, aes(land_pc3, land_pc4, colour=cont_hetero)) p + geom_point(size=3, alpha=0.6) + theme_bw() + scale_colour_gradient2(low="black",mid="dodgerblue",high="yellow", midpoint=6.5) #Visualize Elliptical Fourier Descriptor PCA by species class p <- ggplot(data, aes(efd_pc1, efd_pc2, colour=class)) p + geom_point(size=3, alpha=0.6) + theme_bw() + scale_colour_brewer(type="qual", palette=2) p <- ggplot(data, aes(efd_pc3, efd_pc4, colour=class)) p + geom_point(size=3, alpha=0.6) + theme_bw() + scale_colour_brewer(type="qual", palette=2) #Visualize Elliptical Fourier Descriptor PCA by heteroblasty p <- ggplot(data=data, aes(efd_pc1, efd_pc2, colour=cont_hetero)) p + geom_point(size=3, alpha=0.6) + theme_bw() + scale_colour_gradient2(low="black",mid="dodgerblue",high="yellow", midpoint=6.5) p <- ggplot(data=data, aes(efd_pc3, efd_pc4, colour=cont_hetero)) p + geom_point(size=3, alpha=0.6) + theme_bw() + scale_colour_gradient2(low="black",mid="dodgerblue",high="yellow", midpoint=6.5)
17f31d96a744adfc39479bc16c19a948c966b9cd
d75b7bc015b47d94254bcc9334ba15972d3ec9a1
/1. FIRST YEAR/Introduction to Computing/Exercices_Laura/exercici82.R
4c1406c79c362b95a78a94be3b2bbd776b16358a
[]
no_license
laurajuliamelis/BachelorDegree_Statistics
a0dcfec518ef70d4510936685672933c54dcee80
2294e3f417833a4f3cdc60141b549b50098d2cb1
refs/heads/master
2022-04-22T23:55:29.102206
2020-04-22T14:14:23
2020-04-22T14:14:23
257,890,534
0
0
null
null
null
null
UTF-8
R
false
false
232
r
exercici82.R
SumaVectors <- function (v1,v2){ s1 <- 0 s2 <- 0 for (i in 1:length(v1)){ s1 <- s1 + v1[i] } for (i in 1:length(v2)){ s2 <- s2 + v2[i] } return(s1+s2) } v1 <- c(1,1,1,1) v2 <- c(1,1,1,1,5,6)
c6f26490f4e15ef2dbedf8e3699c0ccd8523ae4c
8da9024b102ccfde5f2bbc999114adb82fbc39f1
/man/makeMovie.Rd
e6c779489774b22663ee268c4978c0441b0d392a
[]
no_license
pmur002/director
4747d2a823cdaa56472f4692db2d5823819d9b7b
59202fe01cfe7c2677e5160614716e31a81f314f
refs/heads/master
2021-06-23T21:25:25.459912
2021-02-17T02:08:01
2021-02-17T02:08:01
75,572,092
2
0
null
null
null
null
UTF-8
R
false
false
3,033
rd
makeMovie.Rd
\name{makeMovie} \alias{makeMovie} \title{ Make a Movie } \description{ Make a movie from an XML script file. } \usage{ makeMovie(filename, wd=paste0(gsub("[.]xml$", "", filename), "-movie"), TTS=espeakTTS(), world=realWorld, validate=TRUE, clean=FALSE) } \arguments{ \item{filename}{The name of an XML script file.} \item{wd}{A path to a working directory, where all files will be created.} \item{TTS}{An object that can turn text into audio; see \code{\link{TTS}}.} \item{world}{The world that the movie will be set in; see \code{\link{realWorld}}.} \item{validate}{Should the script be validated against a DTD.} \item{clean}{A logical indicating whether to erase the working directory before beginning recording.} } \details{ The XML file is assumed to have the following structure: \itemize{ \item{A root element called \code{script}, containing ...} \item{A single \code{setting} element (optional).} \item{A single \code{stage} element with \code{width} and \code{height} attributes.} \item{One or more \code{scene} elements (with optional \code{id} and \code{record} attributes), containing ...} \itemize{ \item{One or more \code{shot} elements (with optional \code{duration}, \code{location} and \code{id} attributes), containing ...} \itemize{ \item{Zero or one \code{keyaction} or \code{pointeraction} elements, zero or onw \code{location} elements, and zero or one \code{dialogue} elements. The \code{keyaction} element can have \code{keydelay} and \code{linedelay} attributes. The content of the \code{keyaction} element will be fed as key events to the location that is identified by the \code{location} attribute of the \code{keyaction} element. If the \code{location} is \code{"backstage"}, the code within the \code{keyaction} is just run in a shell. Each \code{location} must have an \code{id} attribute and can have \code{x}, \code{y}, \code{width}, and \code{height} attributes. The \code{location} corresponds to the window that is generated by the \code{keyaction} or \code{pointeraction} (if any). A \code{dialogue} element contains either text or a \code{speak} element that describes dialogue using Speech Synthesis Markup Language (SSML) \url{https://www.w3.org/TR/speech-synthesis11/}. } } } } The package contains a DTD for the script structure (see the \code{DTD} directory in the installed package) and, if \code{validate} is \code{TRUE}, the script is validated against that DTD. } \value{ A list containing the path to the complete final video, plus paths to individual scene video files. } \author{ Paul Murrell } \seealso{ \code{\link{TTS}}, \code{\link{realWorld}}. } \keyword{ utilities }
ff36010222582f92f40e5e4317644b5986c58ba4
5d0ae5bb914a6c9d05d0fcb3b8ebb28f84d81851
/R/custom_model.R
7d8cbaee11ee8ba70b1c6684b1e2d70ae7d53960
[]
no_license
yangxhcaf/Master_Thesis
57678f3b347d35bab021345a9b97dbdedcba155e
e14ee6cbe4c74c13aa1faf4f5cdb4ff380e6498c
refs/heads/master
2022-06-21T14:16:57.963992
2020-05-12T11:14:48
2020-05-12T11:14:48
null
0
0
null
null
null
null
UTF-8
R
false
false
6,919
r
custom_model.R
#' Custom Model #' #' Build a customized, vgg16 and unet based model #' #' @source Partially based on the work of Christian Knoth at https://github.com/DaChro/cannons_at_marmots #' #' @param input_shape Dimensions of input. Standard: 128*128 resolution, 1 channel greyscale; for RGB use 3 channels #' @param num_classes Number of classes. Standard: 2 for binary classification. Example: Tree vs. not tree #' #' @return Returns a Keras model #' #' @examples \dontrun{model <- build_vgg_unet()} build_custom_model <- function(input_shape = c(128, 128, 3), num_classes = 2) { #---Input------------------------------------------------------------------------------------- inputs <- layer_input(name = "input_1", shape = input_shape) #---Downsampling------------------------------------------------------------------------------ down1 <- inputs %>% layer_conv_2d(name = "down1_conv1", filters = 64, kernel_size = 3, input_shape = c(128, 128, 1), padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling") %>% layer_conv_2d(name = "down1_conv2", filters = 64, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling") down1_pool <- down1 %>% layer_max_pooling_2d(name = "down1_pool", pool_size = c(2, 2), strides = c(2, 2)) down2 <- down1_pool %>% layer_conv_2d(name = "down2_conv1", filters = 128, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling") %>% layer_conv_2d(name = "down2_conv2", filters = 128, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling") down2_pool <- down2 %>% layer_max_pooling_2d(name = "down2_pool", pool_size = c(2, 2), strides = c(2, 2)) %>% layer_dropout(0.2) down3 <- down2_pool %>% layer_conv_2d(name = "down3_conv1", filters = 256, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling") %>% layer_conv_2d(name = "down3_conv2", filters = 256, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling") down3_pool <- down3 %>% layer_max_pooling_2d(name = "down3_pool", pool_size = c(2, 2), strides = c(2, 2)) down4 <- down3_pool %>% layer_conv_2d(name = "down4_conv1", filters = 512, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling") %>% layer_conv_2d(name = "down4_conv2", filters = 512, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling") down4_pool <- down4 %>% layer_max_pooling_2d(name = "down4_pool", pool_size = c(2, 2), strides = c(2, 2)) #---Center----------------------------------------------------------------------------------- center <- down4_pool %>% layer_dropout(0.2) #---Upsampling-------------------------------------------------------------------------------- up4 <- center %>% layer_conv_2d(name = "up4_conv1", filters = 512, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling") %>% layer_conv_2d(name = "up4_conv2", filters = 512, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling") %>% layer_batch_normalization() %>% layer_conv_2d_transpose(name = "up4_upconv_1", filters = 128, kernel_size = 2, strides = c(2, 2), padding = "same", data_format = "channels_last", activation = "linear") %>% {layer_concatenate(name = "up4_conc1", inputs = list(down4, .))} up3 <- up4 %>% layer_conv_2d(name = "up3_conv1", filters = 256, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling") %>% layer_conv_2d(name = "up3_conv2", filters = 256, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling") %>% layer_batch_normalization() %>% layer_conv_2d_transpose(name = "up3_upconv_1", filters = 128, kernel_size = 2, strides = c(2, 2), padding = "same", data_format = "channels_last", activation = "linear") %>% {layer_concatenate(name = "up3_conc1", inputs = list(down3, .))} up2 <- up3 %>% layer_conv_2d(name = "up2_conv1", filters = 128, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling") %>% layer_conv_2d(name = "up2_conv2", filters = 128, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling") %>% layer_batch_normalization() %>% layer_conv_2d_transpose(name = "up2_upconv1", filters = 128, kernel_size = 2, strides = c(2, 2), padding = "same", data_format = "channels_last", activation = "linear") %>% {layer_concatenate(name = "up2_conc1", inputs = list(down2, .))} %>% layer_dropout(0.2) up1 <- up2 %>% layer_conv_2d(name = "up1_conv1", filters = 64, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling") %>% layer_conv_2d(name = "up1_conv2", filters = 64, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling") %>% layer_batch_normalization() %>% layer_conv_2d_transpose(name = "up1_upconv1", filters = 128, kernel_size = 2, strides = c(2, 2), padding = "same", data_format = "channels_last", activation = "linear") %>% {layer_concatenate(name = "up1_conc1", inputs = list(down1, .))} #---Classification/Output--------------------------------------------------------------------- classify <- layer_conv_2d(up1, filters = num_classes, kernel_size = c(1, 1), activation = "sigmoid") # Build specified model and assign it to variable model <- keras_model( inputs = inputs, outputs = classify ) return(model) }
33e35c72587296a32e2d9549008adef43c255ed4
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
/cran/paws.machine.learning/man/polly_get_speech_synthesis_task.Rd
cf4a101336d5aaab09c082c9f6b1e74e7cbb6203
[ "Apache-2.0" ]
permissive
paws-r/paws
196d42a2b9aca0e551a51ea5e6f34daca739591b
a689da2aee079391e100060524f6b973130f4e40
refs/heads/main
2023-08-18T00:33:48.538539
2023-08-09T09:31:24
2023-08-09T09:31:24
154,419,943
293
45
NOASSERTION
2023-09-14T15:31:32
2018-10-24T01:28:47
R
UTF-8
R
false
true
782
rd
polly_get_speech_synthesis_task.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/polly_operations.R \name{polly_get_speech_synthesis_task} \alias{polly_get_speech_synthesis_task} \title{Retrieves a specific SpeechSynthesisTask object based on its TaskID} \usage{ polly_get_speech_synthesis_task(TaskId) } \arguments{ \item{TaskId}{[required] The Amazon Polly generated identifier for a speech synthesis task.} } \description{ Retrieves a specific SpeechSynthesisTask object based on its TaskID. This object contains information about the given speech synthesis task, including the status of the task, and a link to the S3 bucket containing the output of the task. See \url{https://www.paws-r-sdk.com/docs/polly_get_speech_synthesis_task/} for full documentation. } \keyword{internal}
ac4af16e5a89d04c7af302cd55b783a4d19068bd
2b3d3b4f510d250b196607ec7c78095711bd2aef
/devel/alligator.R
6ad818f0cdd6bd4154012ad6a1ff5a8c65ccd7fc
[ "MIT" ]
permissive
cjgeyer/glmbb
df80553cc60d3fb9a636530f89668843d7a331b5
34d7ee54ce5ae6476319020c41d233cb060e9da5
refs/heads/master
2021-08-06T18:12:04.643391
2021-01-26T22:43:48
2021-01-26T22:43:48
55,895,366
0
0
null
null
null
null
UTF-8
R
false
false
1,190
r
alligator.R
library(Matrix) library(CatDataAnalysis) data(table_8.1) d <- transform(table_8.1, lake = factor(lake, labels = c("Hancock", "Oklawaha", "Trafford", "George")), gender = factor(gender, labels = c("Male", "Female")), size = factor(size, labels = c("<=2.3", ">2.3")), food = factor(food, labels = c("Fish", "Invertebrate", "Reptile", "Bird", "Other"))) # models that give warnings are # # count ~ lake * gender * size + food * lake * (gender + size) # count ~ lake * gender * size + food * (lake + gender + size)^2 # # demo that gout <- glm(count ~ lake * gender * size + food * lake * (gender + size), family = poisson, data = d) gout <- glm(count ~ lake * gender * size + food * (lake + gender + size)^2, family = poisson, data = d) # now for GDOR formula <- count ~ food * (lake + gender + size)^2 conditioning <- ~ lake * gender * size modmat.formula <- sparse.model.matrix(formula, data = d) modmat.conditioning <- sparse.model.matrix(conditioning, data = d) modmat.qr <- qr(modmat.conditioning) foo <- qr.resid(modmat.qr, modmat.formula) bar <- apply(foo^2, 2, sum) names(bar)[bar < 1e-6] min(bar[bar >= 1e-6]) names(bar)[bar >= 1e-6]
0a49c26cba2b0b64c81864f89121463442d38e3e
2ca5918b1a1f74b8e59fe034af4e9f3a917a5454
/code/2020/2020_34_PlantsInDanger.R
bc38be58e86787c69fa37f3faaab62d1247ea597
[]
no_license
bonschorno/TidyTuesday
9300738ccf845baaf152a668ff30fcd23a5ee09a
e485dc9f8915084848abd9bfdd8c5970942bbb81
refs/heads/master
2023-02-18T22:53:31.666381
2021-01-20T07:58:45
2021-01-20T07:58:45
286,958,495
2
0
null
null
null
null
UTF-8
R
false
false
2,880
r
2020_34_PlantsInDanger.R
#Week 34: Plants in Danger library(tidyverse) library(ggalluvial) library(hrbrthemes) library(ggsci) plants <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-08-18/plants.csv') #data wrangling plants_clean <- plants %>% filter(group == "Flowering Plant", threat_NA == 0) %>% select(year_last_seen, contains("threat")) %>% pivot_longer(-c(year_last_seen), names_to = "threat", values_to = "number") #preparing data for plotting plant_alluvial <- plants_clean %>% group_by(year_last_seen, threat) %>% summarise(total = sum(number)) %>% group_by(year_last_seen) %>% mutate(slot_total = sum(total), total_per = total/slot_total) %>% group_by(threat) %>% mutate(group = group_indices()) %>% drop_na(year_last_seen) %>% filter(threat != "threat_NA") %>% mutate(threat = str_remove(threat, "threat_")) %>% mutate(threat = case_when(threat == "AA" ~ "Agriculture & Aquaculture", threat == "BRU" ~ "Biological Resource Use", threat == "CC" ~ "Climate Change", threat == "EPM" ~ "Energy Production", threat == "GE" ~ "Geological Events", threat == "HID" ~ "Human Intrusions", threat == "ISGD" ~ "Invasive Species", threat == "NSM" ~ "Natural System Modifications", threat == "P" ~ "Pollution", threat == "RCD" ~ "Commercial Development", threat == "TS" ~ "Transportation Corridor")) %>% filter(threat != "Geological Events", threat != "Pollution") #reordering factors fct_relevel(plant_alluvial$year_last_seen) plant_alluvial$year_last_seen <- fct_relevel(plant_alluvial$year_last_seen, "Before 1900") #plot ggplot(plant_alluvial, aes(x = year_last_seen, stratum = threat, alluvium = group, y = total, fill = threat, label = threat)) + geom_flow(alpha = 1, width = 0, curve_type = "linear") + scale_fill_uchicago() + facet_wrap(threat ~ .) + labs(title = "Plants in Danger\n", x = "", y = "Number of threatened species\n", fill = "", caption = "\n\nSource: IUCN Red list of Threatened Species | Graphic: @bonschorno") + theme_ft_rc(base_family = "IBM Plex Sans Medium") + theme(legend.position = "none", panel.grid.major.x = element_blank(), panel.grid.major.y = element_line(linetype = "dotted"), panel.grid.minor = element_blank(), strip.text = element_text(hjust = 0.5, face = "bold", color = "white", size = 9), axis.text.x = element_text(size = 7, angle = 45, vjust = -.05), axis.text.y = element_text(size = 6), plot.title = element_text(size = 30, hjust = 0.5)) ggsave("ExtinctPlants.png", height = 20, width = 20, units = "cm", dpi = 500)
448232206eb13c2e35161f2dc52735ff258e81e8
839de161296bcbb4593fe20f282518607866b346
/R/loplot_v1.R
ae767ce963fe76a697ba80a5f9113f883a436d44
[]
no_license
cran/statTarget
1a37156aabc3b664674fedf4a225bab2cf9eb828
befb9ebe688584ccdab61941c635356dc3fbb828
refs/heads/master
2021-01-20T18:33:26.720214
2016-07-20T09:46:08
2016-07-20T09:46:08
63,609,082
1
2
null
null
null
null
UTF-8
R
false
false
1,552
r
loplot_v1.R
#' loplot provide the visible figure of QC-RLS correction. #' @param x the file before QC-RLS correction. #' @param z the file after QC-RLS correction. #' @param i a index for the name of variable. #' @export loplot <- function(x,z,i){ # x is the loess cn <- colnames(x) qcid <- grep("QC",cn) RSD30_CV=paste(rownames(x)[i],"_", i,".pdf", sep="") dirout.loplot <- paste(getwd(), "/statTarget/shiftCor/After_shiftCor/loplot", sep="") dir.create(dirout.loplot) pdf(paste(dirout.loplot,RSD30_CV,sep="/"),width = 6,height = 6) graphics::layout(matrix(1:2,nrow=2)) numY <- 1:dim(x)[2] graphics::plot(numY,x[i,],pch=19,col="yellow",ylab = c("Intensity"), xlab = c("Injection Order"), main = "Raw Peak") points(qcid,x[i,qcid],pch=19,col="blue") legend("top", c("Sample", "QC"),col=c('yellow', 'blue'), lty=1,pch= 19,bty="n", cex=0.75,horiz = TRUE) #lines(qcid,x[i,qcid],col=rgb(0,0,0,0.3),lwd=4) loe <- loess(x[i,qcid]~qcid) points(numY,predict(loe,numY),type='l',col=rgb(0,0,0,0.3),lwd=4) graphics::plot(numY,z[i,],pch=19,col="yellow",ylab = c("Intensity"), xlab = c("Injection Order"),main = "Corrected Peak") points(qcid,z[i,qcid],pch=19,col="blue") #abline(h = 1, type='l',col=rgb(0,0,0,0.3),lwd=4) #lines(qcid,z[i,qcid],col=rgb(0,0,0,0.3),lwd=4) #loe_n <- loess(z[i,qcid]~qcid) #points(numY,predict(loe_n,numY),type='l',col=rgb(0,0,0,0.3),lwd=4) legend("top", c("Sample", "QC"),col=c('yellow', 'blue'), lty=1,pch= 19,bty="n", cex=0.75,horiz = TRUE) dev.off() }
69790ef6673ac594a162915babd1bccde4bbd727
630a70ffc25834bfe3aa9623708f47e0245a461f
/RExamples/api_examples.R
a17f383baab0a6604a41460b75d0c8bd62357411
[]
no_license
mjiapalucci/MC
be2214ebdc3842f63025f5498219061d39ec819e
58f0f2d2f5f9352c721385f5593f5d03747cc1b6
refs/heads/main
2023-04-05T10:56:11.335397
2021-03-31T21:19:23
2021-03-31T21:19:23
303,212,939
0
0
null
null
null
null
UTF-8
R
false
false
412
r
api_examples.R
library(httr) library(jsonlite) library(lubridate) library(tidyverse) r <- GET("https://data.montgomerycountymd.gov/resource/xhwt-7h2h") glimpse(r) content(r, "text") jsonRespText <- content(r, as="text") jsonRespParsed <- content(r, as="parsed") jsonRespParsed df <- fromJSON(jsonRespText) head(df) class(df) df1 <- as_tibble(df) df1$arrest_date <- ymd_hms(df1$arrest_date, tz="EST") df1$arrest_date df1
3820d35b352c3b465da6ed0b32946aeabceceaad
13895420920703501ab66c28a3927089a2de042e
/R/simGene.R
6c1ad2774001e83bfcf22219b2bd1cbaba078183
[]
no_license
cran/psych
3349b3d562221bb8284c45a3cdd239f54c0348a7
ee72f0cc2aa7c85a844e3ef63c8629096f22c35d
refs/heads/master
2023-07-06T08:33:13.414758
2023-06-21T15:50:02
2023-06-21T15:50:02
17,698,795
43
42
null
2023-06-29T05:31:57
2014-03-13T05:54:20
R
UTF-8
R
false
false
1,036
r
simGene.R
"simGene" <- function(ng=10,traits=1,n.obs=1000,dom=TRUE) { X <- array(sample(2,ng*n.obs*traits*3,replace=TRUE),dim=c(n.obs,ng,traits,3)) MZ <- DZ <- array(NA,dim=c(n.obs,ng,traits)) MZt <- DZt <- matrix(NA,n.obs,traits) for(t in 1:traits) { if(dom) { MZ[,1:ng,t] <- X[,1:ng,t,1] * X[,1:ng,t,2] #the allele values are mulitplied DZ[,1:ng,t] <- X[,1:ng,t,1] * X[,1:ng,t,3]} else { MZ[,1:ng,t] <- X[,1:ng,t,1] + X[,1:ng,t,2] #the allele values are added DZ[,1:ng,t] <- X[,1:ng,t,1] + X[,1:ng,t,3]} MZt[,t] <- rowMeans(MZ[,,t]) #the trait values DZt[,t] <- rowMeans(DZ[,,t]) } X.df <- data.frame(genes=X[,1:ng,1:traits,sample(2,1,replace=TRUE)],MZ=MZt,DZ=DZt) return(X.df)} test.simGene <- function(x, ng=10) { t1 <-rowMeans(x[1:ng]) t2 <- rowMeans(x[(ng+1):(ng*2)]) t11 <-rowMeans(x[1:(ng/2)]) t12 <-rowMeans(x[(ng/2 +1):ng]) t21 <-rowMeans(x[(ng+1):(ng/2 + ng)]) t22 <- rowMeans(x[(ng/2 + ng+1):(ng*2)]) scores <- data.frame(t1=t1,t2=t2,t11=t11,t12=t12,t21 = t21,t22=t22,traits=x[(ng*2 +1):(ng*2+4)]) }
3d0c6ec490c4180fde020a862b3ae79975dba5ed
0e0c93c587aedbcb71c8aeae3427a7b17c6422b0
/AND2.R
e2f1cb860ba9017c56b43882b9caf65a902d350c
[]
no_license
sekersse/QIT
fb5ae76871274020ab5d9076bcd8be616352db20
1becaf3286dfdb2456f88b44097a859427dd6eb4
refs/heads/master
2020-05-31T11:00:36.345434
2019-06-04T18:04:07
2019-06-04T18:04:07
190,253,206
0
1
null
null
null
null
UTF-8
R
false
false
1,407
r
AND2.R
library(neuralnet) set.seed(7896129) ######################## ### Ergebnisvektor ######################## AND <- c(0,0,0,1) ##### Input und Ergebnis als data.frame data.frame(Var1=c(0,1,0,1), Var2=c(0,0,0,1), AND) and.data <- data.frame(expand.grid(c(0,1), c(0,1)), AND) and.data ##### Neuronales Netz berechnen ## Zielfunktion festlegen f<- AND~Var1+Var2 ## Netz berechnen hiddenlayers=0, Wiederholungen eine, net.and <- neuralnet( f, and.data, hidden=0, rep=1, lifesign="minimal") #Textausgabe des KNN print(net.and) #plot den KNN plot(net.and, rep="best") ######################## ## Netz testen ######################## res<-compute(net.and, rep=1, and.data[,1:2]) res resrounded<-round(res$net.result[,1],0) resrounded #AND= (0,0,0,1) #resrounded= (0,0,0,1) table(AND, resrounded) ######################## ## Weitere Experimente ######################## net.and <- neuralnet( AND~Var1+Var2, and.data, hidden=2, rep=10, lifesign="minimal") net.and <- neuralnet( AND~Var1+Var2, and.data, hidden=1, rep=10, lifesign="minimal") net.and <- neuralnet( AND~Var1+Var2, and.data, hidden=3, rep=10, lifesign="minimal") net.and <- neuralnet( AND~Var1+Var2, and.data, hidden=c(2,1), rep=10, lifesign="minimal") net.and <- neuralnet( AND~Var1+Var2, and.data, hidden=c(2,2), rep=10, lifesign="minimal") net.and <- neuralnet( AND~Var1+Var2, and.data, hidden=c(2,2), rep=100, lifesign="minimal")
735d23ddaeb1272cd23d11129df9bb2a8f1a5411
8f460306738b0e454489b44e25970f52f1b236d3
/classification.R
5f95141cbc4c68c9dcb32688676ebb7d0af1fc2c
[]
no_license
mattymo18/TCGA_Gene_Expression
1343a6ec48dcc8b8c3b161fa01736bbced200d3f
6eae8f1ff59d79536f1fc5877e31304c4973ecbe
refs/heads/master
2023-03-24T09:29:42.205171
2021-03-23T00:58:30
2021-03-23T00:58:30
342,007,463
0
2
null
null
null
null
UTF-8
R
false
false
1,200
r
classification.R
# data and libs library(tidyverse) library(class) library(knitr) library(kableExtra) webshot::install_phantomjs() DF.Center <- read.csv("derived_data/TCGA.centered.csv") %>% select(-X) #set seed set.seed(315) # Break into train and test sets. #Sample the rows for the training set index <-sample(1:nrow(DF.Center),round(nrow(DF.Center)*0.8)) DF_train<-DF.Center[index,-1] #Training set train_class<-as.factor(DF.Center[index,1]) #classifications of training set DF_test<-DF.Center[-index,-1] #Test set test_class<-as.factor(DF.Center[-index,1]) #Classifications of test set # Run classification algorithm. CL_train=knn(DF_train, DF_train, train_class, k=3) # Compute accuracy on test set. CL_test=knn(DF_train, DF_test, train_class, k=3) # Find classes where the algorithm performed best / worst. confusion.mat <- as.matrix(table(Actual = test_class, Predicted = CL_test)) Confusion <- kable(confusion.mat) %>% kable_styling() %>% save_kable(file = "derived_graphics/Confusion.Table.png", zoom = 1) Confusion <- kable(confusion.mat) %>% kable_styling() %>% save_kable(file = "README_graphics/Confusion.Table.png", zoom = 1) saveRDS(CL_test, "derived_models/Test.Knn.Mod.rds")
969359d1f797394cde42aed15ed6d2fdc396ecbf
6a28ba69be875841ddc9e71ca6af5956110efcb2
/Elementary_Number_Theory_by_David_M._Burton/CH10/EX10.8/Ex10_8.R
d624315341452dbd96592a91f31b5afa3eb9b1c5
[]
permissive
FOSSEE/R_TBC_Uploads
1ea929010b46babb1842b3efe0ed34be0deea3c0
8ab94daf80307aee399c246682cb79ccf6e9c282
refs/heads/master
2023-04-15T04:36:13.331525
2023-03-15T18:39:42
2023-03-15T18:39:42
212,745,783
0
3
MIT
2019-10-04T06:57:33
2019-10-04T05:57:19
null
UTF-8
R
false
false
250
r
Ex10_8.R
#page 215 p <- 113 r <- 3 k <- 37 two <- (r ^ 2) %% p four <- (two ^ 2) %% p eight <- (four ^ 2) %% p sixteen <- (eight ^ 2) %% p thirty_two <- (sixteen ^ 2) %% p a <- (r * four * thirty_two) %% p public_key <- c(p, r, a) print(public_key)
60e9dba832247241cd73ed41d3bfd206ce42b717
38c84e91ec840f606b0c8a4b8d1a45a6144a091a
/man/SedimentWater.Rd
57305d341747c985197d9a4d69b4ae57c7cd6ab2
[]
no_license
zhenglei-gao/SedimentWater
d0ee6d9e5304bee5aa29f1cc87a3fc6f76995cfd
ab5b84e617dad9ffced2111a92dfd156c0d3aedf
refs/heads/master
2021-01-10T01:49:05.716956
2015-09-29T12:35:44
2015-09-29T12:35:44
43,310,233
0
0
null
2015-09-29T12:35:44
2015-09-28T15:44:28
R
UTF-8
R
false
false
256
rd
SedimentWater.Rd
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/SedimentWater-package.r \docType{package} \name{SedimentWater} \alias{SedimentWater} \alias{SedimentWater-package} \title{SedimentWater.} \description{ SedimentWater. }
e7d8c64c9fde3c640c6e1cdac614da81e5507ad4
29585dff702209dd446c0ab52ceea046c58e384e
/tmap/R/split_tm.R
d4ed1ea59c2c978793ae782c918862d36cf40122
[]
no_license
ingted/R-Examples
825440ce468ce608c4d73e2af4c0a0213b81c0fe
d0917dbaf698cb8bc0789db0c3ab07453016eab9
refs/heads/master
2020-04-14T12:29:22.336088
2016-07-21T14:01:14
2016-07-21T14:01:14
null
0
0
null
null
null
null
UTF-8
R
false
false
1,341
r
split_tm.R
split_tm <- function(gp, nx, order_by) { gpnx <- lapply(1:nx, function(i){ g <- mapply(function(x, o) { oid <- if(is.null(o)) NULL else o[[i]] mapply(get_i, x, names(x), MoreArgs = list(i=i, n=x$npol, oid=oid), SIMPLIFY=FALSE) }, gp, order_by, SIMPLIFY=FALSE) }) names(gpnx) <- paste0("plot", 1:nx) gpnx } get_i <- function(x, xname, i, n, oid) { if (is.null(oid) && is.matrix(x)) oid <- 1:nrow(x) if (is.null(x)) { NULL } else if (is.matrix(x)) { if (ncol(x)>=i) x[oid,i] else x[oid,1] } else if (is.data.frame(x)) { stop("is.data.frame") if (ncol(x)>=i) x[oid,i] else x[oid,1] } else if(is.list(x)) { ncx <- nchar(xname) if (xname %in% c("varnames", "idnames")) { x } else if (substr(xname, ncx-4, ncx) ==".misc") { # these are lists themselves mapply(get_i, x, names(x), MoreArgs = list(i=i, n=n, oid=NULL), SIMPLIFY=FALSE) } else { if (length(x)>=i) x[[i]] else x[[1]] } } else { ncx <- nchar(xname) # split variables that consist of one value, but may differ across small multiples if (xname %in% c("bubble.size.legend.palette", "bubble.max.size", "line.lwd.legend.palette", "line.legend.lwd", "text.max.size", "text.size.legend.palette") || substr(xname, ncx-11, ncx)=="legend.title") { if (length(x)>=i) x[i] else x[1] } else x } }
b350b0ae0e54b11ae2bd54366f9417f76d7cc6a1
a48793031ba127d1635bb0a80070896f89a48a40
/ANLY_506_1_5Rscript.R
d90f77a4126ec284ee242272e337d9f7d7073e40
[]
no_license
EAMMensah/ANLY_506_Code_Portfolio
c83f363d9762d83f72a1e66e57a8cd0b11bd10cc
59755a8e7df8981ae07ad826a5f2765e003e90bf
refs/heads/master
2020-06-25T15:18:11.094568
2019-07-29T00:27:48
2019-07-29T00:27:48
199,350,825
0
0
null
null
null
null
UTF-8
R
false
false
22,957
r
ANLY_506_1_5Rscript.R
# week 2 - Base plots # plotting a histogram library(datasets) hist(airquality$Ozone) # plotting a boxplot airquality <- transform(airquality, Month = factor(Month)) boxplot(Ozone ~ Month, airquality, xlab = "Month", ylab = "Ozone (ppb)") #plotting a scatterplot with(airquality, plot(Wind, Ozone)) # testing deafult parameters par("lty") par("col") par("pch") par("bg") par("mar") par("mfrow") # Base plots with title library(datasets) # Make the initial plot with(airquality, plot(Wind, Ozone)) # Add a title title(main = "Ozone and Wind in New York City") # Adding colour to plots with(airquality, plot(Wind, Ozone, main = "Ozone and Wind in New York City")) with(subset(airquality, Month == 5), points(Wind, Ozone, col = "blue")) # Adding legends to plots with(airquality, plot(Wind, Ozone, main = "Ozone and Wind in New York City", type = "n")) with(subset(airquality, Month == 5), points(Wind, Ozone, col = "blue")) with(subset(airquality, Month != 5), points(Wind, Ozone, col = "red")) legend("topright", pch = 1, col = c("blue", "red"), legend = c("May", "Other Months")) # Adding regression lines to plot with(airquality, plot(Wind, Ozone, main = "Ozone and Wind in New York City", pch = 20)) # Fit a simple linear regression model model <- lm(Ozone ~ Wind, airquality) # Draw regression line on plot abline(model, lwd = 2) # Plotting multiple base plots, panel plot with two plots par(mfrow = c(1, 2)) with(airquality, { plot(Wind, Ozone, main = "Ozone and Wind") plot(Solar.R, Ozone, main = "Ozone and Solar Radiation")}) # Panel plot with three plots with(airquality, { plot(Wind, Ozone, main = "Ozone and Wind") plot(Solar.R, Ozone, main = "Ozone and Solar Radiation") plot(Temp, Ozone, main = "Ozone and Temperature") mtext("Ozone and Weather in New York City", outer = TRUE) }) # Week 3 # reading the data into r using readr library(readr) data_EPA <- read_csv("C:/Users/mawufemor/Desktop/US EPA data 2017.csv") # rewriting the names of the columns to remove spaces names(data_EPA) <- make.names(names(data_EPA)) # checking the number of rows nrow(data_EPA) # checking the number of columns ncol(data_EPA) # reviewing the dataset str(data_EPA) # looking at the top of the data head(data_EPA) # looking at the botton of the data tail(data_EPA) # looking at only a few columns of the top of data head(data_EPA[,c(6:7, 10)]) # looking at only a few columns of the tail of data tail(data_EPA[,c(6:7, 10)]) # looking at the time measurements were taken table(data_EPA$X1st.NO.Max.DateTime) # installing dplyr library library(dplyr) #filter(data_EPA, X1st.NO.Max.DateTime == "15:00") %>% # select(state.name, County.name, Date.Local, # Time.Local, Sample.Measurement) # #filter(ozone, State.Code == "36" # & County.Code == "033" # & Date.Local == "2014-09-30") %>% # select(Date.Local, Time.Local, # Sample.Measurement) %>% # as.data.frame #Date.Local Time.Local Sample.Measurement # reviewing uniques State names unique(data_EPA$State.Name) # validating with a dat source summary(data_EPA$Parameter.Code) # getting more detail on the distribution quantile(data_EPA$Parameter.Code, seq(0, 1, 0.1)) # Identiyfing each county using a combination of state name and county name rank_EPA <- group_by(data_EPA, State.Name, County.Name) %>% summarize(data_EPA = mean(X1st.Max.DateTime)) %>% as.data.frame %>% arrange(desc(data_EPA)) rank_EPA # looking at the top 10 counties in this ranking head(rank_EPA, 10) # looking at the botton 10 counties in this ranking tail(rank_EPA, 10) # reviewing number of observations for Alaska #filter(data_EPA, State.Name == "California" & County.Name == "Riverside") %>% # mutate(month = factor(months(Date.Local), levels = month.name)) %>% # group_by(month) %>% # summarize(data_EPA = mean(Pollutant.Standard)) # converting the date variable into a Date class # reviewing one o the counties filter(data_EPA, state.name == "California" & County.Name == "Riverside") %>%nrow # Randomizing the data set.seed(10234) N_EPA <- nrow(data_EPA) id_EPA <- sample(N_EPA, N_EPA, replace = TRUE) data_EPA2 <- data_EPA[id_EPA, ] # reconstructing the rankings based on resampled data rank_EPA2 <- group_by(data_EPA2, State.Name, County.Code) %>% summarize(data_EPA= mean(X1st.Max.DateTime)) %>% as.data.frame %>% arrange(desc(data_EPA)) # reviewing top 10 ranking based on resampled data cbind(head(rank_EPA, 10), head(rank_EPA2, 10)) # reviewing bottom 10 ranking based on resampled data cbind(tail(rank_EPA, 10), tail(rank_EPA2, 10)) # week 4 # 1. combining all the information collected into one datarame pirate_info <- data.frame("Name" = c("Astrid", "Lea", "Sarina","Remon","Letizia","Babice","Jonas","Wendy","Niveditha","Gioia"), "sex" = c("F","F","F","M","F","F","M","F","F","F"), "Age" = c(30,25,25,29,22,22,35,19,32,21), "Superhero" = c("Batman","Superman","Batman","Spiderman","Batman","Antman","Batman","Superman","Maggot","Superman"), "Tatoos" = c(11,15,12,5,65,3,9,13,900,0)) pirate_info # 2. finding the median age of 10 pirates median(pirate_info$Age) # 3. finding the mean age of female and male pirates separately #mean(pirate_info$Age[pirate_info$sex=="F"]) with(pirate_info,mean(pirate_info$Age[sex=="F"])) with(pirate_info,mean(pirate_info$Age[sex=="M"])) # 4. finding the most number of tattoos owned by a male pirate with(pirate_info,max(pirate_info$Tatoos[sex=="M"])) # 5. percentage of female pirates under the age of 32 with(subset(pirate_info,pirate_info$sex == "F"), mean(pirate_info$Age<32)) # 6. percentage of pirates under the age of 32 with(subset(pirate_info),mean(pirate_info$Age<32)) # 7. adding a new tattoos.per.year to the dataframe pirate_info$tattoos.per.year <- c(12,19,100,53,24,1,2,0,9,5) pirate_info # 8. pirate with the most number pf tattoos per year with(pirate_info, Name[tattoos.per.year == max(tattoos.per.year)]) # 9. names of female pirates whose favorite superhero is superman subset(x=pirate_info, subset = sex == "F" & Superhero == "Superman", select = Name) # 10. median number of tattoos of pirates over the age of 20 whose favorite superhero is Spiderman with(pirate_info, sum(Age > 20 & Superhero == "Spiderman")) ## PRACTICE 2 - week4 library(tidyverse) typeof(letters) typeof(1:10) x <- list("a", "b", 1:10) length(x) 1:10 %% 3 == 0 c(TRUE, TRUE, FALSE, NA) typeof(1) typeof(1L) 1.5L x <- sqrt(2) ^ 2 x x -2 c(-1, 0, 1)/0 is.finite(0) is.infinite(Inf) is.na(NA) is.nan(NaN) pryr::object_size(x) x <- "This is a reasonably long string" pryr::object_size(x) y <- rep(x, 1000) pryr::object_size(y) NA NA_integer_ NA_real_ NA_character_ # 1. Describe the difference between is.finite(x) and !is.infinite(x) # Both functions accomplish the same goal. Mathematically, !is.infinite is a negation of is.finite is.finite() tests whether a value is not infinite # whereas !is.infinite() tests whether a values is infinite. # 2. Read the source code for dplyr::near() (Hint: to see the source code, drop the ()). How does it work? # dplyr::near() calculates the tolerance level for differnce between two values. # 3. A logical vector can take 3 possible values. How many possible values can an integer vector take? How many values can a double take? # Integer vector can take one value , NA. Whereas double can take four values: -Inf, Inf, NaN and NA # 4. Brainstorm at least four functions that allow you to convert a double to an integer # a) as.integer() - returns the integer value # b) floor() - rounds down to the nearest integer # c) round 0.5 up, round 0.5 down # d) ceil () # 5. what functions from teh readr package allow you to turn a string into logical, integer, and double vector ? # parse() x <- sample(20, 100, replace = TRUE) y <- x > 10 sum(y) mean(y) if(length(x)) { } typeof(c(TRUE, 1L)) typeof(c(1L, 1.5)) typeof(c(1.5, "a")) sample(10)+100 runif(10) > 0.5 1:10+1:2 1:10+1:3 kl <- 1:10+1:3 rep(kl) tibble(x= 1:4, y=1:2) tibble(x= 1:4, y= rep(1:2, 2)) c(x=1, y=2, z=4) set_names(1:3, c("a", "b", "c")) x <- c("one", "two", "three", "four", "five") x[c(3,2,5)] x[c(1,1,5,5,5,2)] x[c(-1, -3, -5)] x[c(1,-1)] x[0] x <- c(10,3,NA,5,8,1,NA) x[!is.na(x)] x[x %%2 == 0] x <- c(abc = 1, def = 2, xyz = 5) x[c("xyz","def")] # Q1. What does mean(is.na(x)) tell you about a vector x? What about sum(!is.finite(x))? # Mean(is.na(x)) gives us the proportion of elements in the vector that are missing values. # Sum(!is.finite(x)) gives us the number of elements in the vector that are not finite values # Q2. Carefully read the documentation of is.vector(). What does it actually test for? Why does is.atomic() not agree with the definition of atomic vectors above? # is.vector() tests whether x is vector os the specified mode. is.atomic() returns TRUE is x is an atomic value, this is also TRUE for NULL values # Q3. Compare and contrast setNames() with purrr::set_names() # setNames() chnages the name of the data frame or table by reference and returns the object # puyrr::Set_names() is used to set the names of objects in a pipeline # Q4. Create functions that take a vector as input and returns: # 1. The last value. Should you use [ or [[? - get_last <- function(x){x[[length(x)]]}: we use [[ to retrieve justa single value # 2. The elements at even numbered positions. - get_even_pos <- function(x){x[c(FALSE, TRUE)]}: both elements will be recycled to the full length of x # 3. Every element except the last value - drop_last <- x[-length(x)]: drops the last value # 4. Only even numbers (and no missing values).- get_even_val <- function(x){s[!is.na(x) & x %% 2 == 0]} # Q5.Why is x[-which(x > 0)] not the same as x[x <= 0]? # # Q6. What happens when you subset with a positive integer that's bigger than the length of the vector? What happens when you subset with a name that doesn't exist? # Returns N/A for all values beyond the length of the vector. Also NA for non-existing names x <- list(1,2,3) x str(x) x_named <- list(a=1, b=2, c=3) str(x_named) y <- list("a", 1L, 1.5, TRUE) str(y) z <- list(list(1,2), list(3,4)) str(z) x1 <- list(c(1,2), c(3,4)) x2 <- list(list(1,2), list(3,4)) x3 <- list(1, list(2, list(3))) a <- list(a=1:3, b="a string", c=pi, d=list(-1,-5)) str(a[1:2]) str(a[4]) str(a[[1]]) str(a[[4]]) a$a a[["a"]] # Q1. Draw the following lists # list(a,b,list(c,d),list(e,f)) # list(list(list(list(list(list(a)))))) # Q2. What happens if you subset a tibble as if you're subsetting a list? What are the key differences between a list and a tibble? # x_list <- list(a = c(1,2,3), b = c("d","e","f"), c = 1:5]) # x_tibble <- tibble(a = c(1,2,3), b = c("d","e","f"), c = 1:5) # A tibble is similar to a list. Subsetting a tibble produces a tibble unless [[]] is used which produces a vector # Augmented Vectors # Q1. What does hms::hms(3600) return? How does it print? What primitive type is the augmented vector built on top of? What attributes does it use? # This produces the time in hours, minutes and seconds. Its atrributes are units (seconds) and class (minutes) # Q2. Try and make a tibble that has columns with different lengths. What happens? tib <- tibble(x=c("a","b","c"), y=c(1:5)) # The function throws an error because columns must be of equal size in a tibble. This is not the case in the fucntions above # Q3. Based on the definition above, is it ok to have a list as a column of a tibble? # It is possible in cases where the vectors all have the same column length # Week 5 install.packages("tidyverse") install.packages("ggplot2") install.packages("labeling") install.packages("magrittr") install.packages("dplyr") library(magrittr) library(dplyr) library(labeling) library(ggplot2) library(tidyverse) table1 table2 table3 # Spread across 2 tibbles table4a # cases table4b # population # compute rate per 10,000 table1 %>% mutate(rate = cases / population * 10000) # compute cases per year table1 %>% count(year, wt = cases) # visualize changes over time ggplot(table1, aes(year, cases)) + geom_line(aes(group = country), colour = "grey50") + geom_point(aes(colour = country)) # description # Exercise 12.2.1 # Q1. Using prose, describe how the variables and observations are organised in each of the sample tables. # In Table 1, year and country identiy each row as observations, the variables are represented in the cases and population columns # In Table 2, each row indicates country, variable and year. Variables count and population are identified on the columns # In Table 3, each row is represnted by country and year. The column rate contains variables count and population in a string format # In Table 4, we can see two distinct tables. Table 4a contains the cases and 4b represents population counts. For both tables, each row contains # values for country and year # Exercise 12.2.2 # creating separate tables for cases and populations table2_cases <- filter(table2, type == "cases") %>% rename(cases = count) %>% arrange(country, year) table2_population <- filter(table2, type == "population") %>% rename(population = count) %>% arrange(country, year) table2_cases table2_population # creating a new dataframe with cases and population columns. Calculating the cases per capita in a new column t2cases_per_capita <- tibble( year = table2_cases$year, country = table2_cases$country, cases = table2_cases$cases, population = table2_population$population )%>% mutate(cases_per_capita = (cases / population) * 10000) %>% select(country, year, cases_per_capita) t2cases_per_capita <- t2cases_per_capita %>% mutate(type = "cases_per_Capita") %>% rename(count = cases_per_capita) bind_rows(table2, t2cases_per_capita) %>% arrange(country, year, type, count) table4c <- tibble( country = table4a$country, `1999` = table4a[["1999"]] / table4b[["1999"]] * 10000, `2000` = table4a[["2000"]] / table4b[["2000"]] * 10000 ) table4c # Table 2 is slight easier to work with in this case beacuse we are able to filter without too much work. # Table 4 was already split into two distinct tables so it was realtively easy to divide cases by population, however this step needed to # be reapeated to each row. The mutate function made is easy to do all the necessary work despite the arrangement of the tables table2 %>% filter(type =="cases") %>% ggplot(aes(year, count)) + geom_line(aes(group = country), colour = "grey50") + geom_point(aes(colour = country)) + scale_x_continuous(breaks = unique(table2$year)) + ylab("cases") tidy4a <- table4a %>% gather(`1999`, `2000`, key = "year", value = "cases") tidy4b <- table4b %>% gather(`1999`, `2000`, key = "year", value = "population") # Joining two tables, table4a and table4b with left_join left_join(tidy4a,tidy4b) table2 table2 %>% spread(key = type, value = count) stocks <- tibble( year = c(2015, 2015, 2016, 2016), half = c(1, 2, 1, 2), return = c(1.88, 0.59, 0.92, 0.17) ) stocks %>% spread(year, return) %>% gather(`2015`:`2016`, key = "year", value = "return") # spread() and gather() are not perfectly symmetrical because column type information is not available. Gather() discards the original column types # by gathering all the variables and coercing them into a single type. The use of spread() on the same dataframe after the use # of gather does not produce similar results since it does not know the original datatypes of the variables. Using the type.convert() function helps to solve this # problem and makes gather() and spread() symmetrical stocks %>% spread(year, return) stocks %>% spread(year, return) %>% gather(`2015`:`2016`, key = "year", value = "return", convert = TRUE) table4a %>% gather(1999, 2000, key = "year", value = "cases") # This code fails because the gather() function in tidyverse views '1999' and '2000' as column numbers/variables instead off column headers. # The solution to this problem is to put quotation marks around the values so they are recognized as character values rather than column variables which do not exist # see code below table4a %>% gather(`1999`, `2000`, key = "year", value = "cases") table4b %>% gather(`1999`,`2000`,key = "year", value = "population") people <- tribble( ~name, ~key, ~value ) glimpse(people) spread(people, key, value) # In this case, spreading the dataframe fails because there are no unique identifiers in this dataframe. This can be resolved by adding unique row counts for each row # in order to serve as a unique identifier for each possible combinations of values before spreading the dataframe. For example, Philip Woods has two different values for age people_again <- people %>% group_by(name,key) %>% mutate(obs = row_number()) people_again spread(people_again, key, value) people %>% distinct(name, key, .keep_all = TRUE) %>% spread(key, value) preg <- tribble( ~pregnant, ~male, ~female, "yes", NA, 10, "no", 20, 12 ) sex("female", "male") pregnant ("yes", "no") count preg_tidy <- preg%>% gather(male, female, key = "sex", value = "count") preg_tidy preg_tidy2 <- preg %>% gather(male, female, key = "sex", value = "count", na.rm = TRUE) preg_tidy2 preg_tidy3 <- preg_tidy2 %>% mutate( female = sex == "female", pregnant = pregnant == "yes" ) %>% select(female, pregnant, count) preg_tidy3 filter(preg_tidy2, sex =="female", pregnant == "no") filter(preg_tidy3, female, !pregnant) # Separating and Uniting # separate table3 table3 %>% separate(rate, into = c("cases", "population")) table3 %>% separate(rate, into = c("cases", "population"), sep ="/") table3 %>% separate(rate, into = c("cases", "population"), convert = TRUE) table3 %>% separate(year, into = c("century","year"), sep = 2) #unite table5 %>% unite(new, century, year) table5 %>% unite(new, century, year, sep = "") # what do the extra and fill arguments do in separate()? Experiment with # the various options for the following two toy datasets tibble(x = c("a,b,c", "d,e,f,g", "h,i,j")) %>% separate(x, c("one", "two", "three")) tibble(x = c("a,b,c","d,e", "f,g,i")) %>% separate(x, c("one","two","three")) # when we run the two sets of data, we get error warning. Experiemnting with fill and extra tibble(x = c("a,b,c", "d,e,f,g", "h,i,j")) %>% separate(x, c("one", "two", "three"), extra = "warn") tibble(x = c("a,b,c", "d,e,f,g", "h,i,j")) %>% separate(x, c("one", "two", "three"), extra = "merge") tibble(x = c("a,b,c", "d,e,f,g", "h,i,j")) %>% separate(x, c("one", "two", "three"), extra = "drop") tibble(x = c("a,b,c","d,e", "f,g,i")) %>% separate(x, c("one","two","three"), fill = "right") tibble(x = c("a,b,c","d,e", "f,g,i")) %>% separate(x, c("one","two","three"), fill = "warn") tibble(x = c("a,b,c","d,e", "f,g,i")) %>% separate(x, c("one","two","three"), fill = "left") # Both unite() and separate() have remove argument. waht does it do? why would you # set it to FALSE # compare and contrast separate () and extract (). why are there three variations of separation # (by position, by separator, and with groups), but by only one unite? # missing values # Explicitly, i.e. flagged with NA. # Implicitly, i.e. simply not present in the data. stocks <- tibble ( year = c(2015,2015,2015,2015,2016,2016,2016), qtr = c(1, 2, 3, 4, 2, 3, 4), return = c(1.88,0.59,NA,0.92,0.17,2.66) ) stocks %>% spread(year, return) stocks %>% spread(year, return) %>% gather(year, return, `2015`:`2016`, na.rm = TRUE) stocks %>% complete(year, qtr) treatment <- tribble( ~ person, ~ treatment, ~response, "Derrick Whitmore",1, 7, NA, 2, 10, NA, 3, 9, "Katherine Burke", 1, 4 ) treatment %>% fill(person) # compare and contrast the fill() arguments to spread() and complete() # what does the direction argument to fill() do # case study who # gathering together all teh columns from new_sp_m014 to newrel_f65. # we laso use na.rm so we can focus on he values that are present who1 <- who %>% gather(new_sp_m014:newrel_f65, key = "key", value ="cases", na.rm = TRUE) who1 # counting all the columns who1 %>% count(key) # formatting the column names to make all the variables consistent who2 <- who1 %>% mutate(key = stringr::str_replace(key, "newrel", "new_rel")) who2 # separating the values with separate who3 <- who2 %>% separate(key, c("new", "type","sexage"), sep = "_") who3 # counting all the columns and dropping columns iso2, iso3 since they are redundant who3 %>% count(new) who4 <- who3 %>% select(-new, -iso2, -iso3) # separate the sexage into sex and age by splitting after the first character who5 <- who4 %>% separate(sexage, c("sex", "age"), sep =1) who5 # combining all the different pieces above together to create one code who %>% gather(key, value, new_sp_m014:newrel_f65, na.rm = TRUE) %>% mutate(key = stringr::str_replace(key, "newrel", "new_rel")) %>% separate(key, c("new", "var", "sexage")) %>% select(-new, -iso2, -iso3) %>% separate(sexage, c("sex","age"), sep = 1) # 1. In this case study I set na.rm = TRUE just to make it easier to check that we had the correct values. Is this reasonable? # Think about how missing values are represented in this dataset. Are there implicit missing values? What's the difference between an NA and zero? # NA represents missing values whereas 0 is for cases that are not present # 2. What happens if you neglect the mutate() step? (mutate(key = stringr::str_replace(key, "newrel", "new_rel"))) # When we neglect the mutate() step, we are unable to identify newrel observations/values # 3. I claimed that iso2 and iso3 were redundant with country. Confirm this claim. who %>% count(country, iso2, iso3) # 4. For each country, year, and sex compute the total number of cases of TB. Make an informative visualisation of the data. t_who %>% group_by(country) %>% mutate(cases_per_country = sum(cases)) %>% group_by(country, year, sex) %>% filter(cases_per_country > 1000000, year > 1995) %>% count(wt = caes) %>% ggplot(aes(year,n)) + geom_line(aes(color = country)) + facet_wrap(~sex, nrow = 2)
953db8f934af43374cc72e3ea8e658b864f38d9c
66e04f24259a07363ad8da7cd47872f75abbaea0
/Joining Data in R with data.table/Chapter 4-Concatenating and Reshaping data.tables/2.R
41120205891e9c5955217b46c20a580e1e5a4603
[ "MIT" ]
permissive
artileda/Datacamp-Data-Scientist-with-R-2019
19d64729a691880228f5a18994ad7b58d3e7b40e
a8b3f8f64cc5756add7ec5cae0e332101cb00bd9
refs/heads/master
2022-02-24T04:18:28.860980
2019-08-28T04:35:32
2019-08-28T04:35:32
325,043,594
1
0
null
null
null
null
UTF-8
R
false
false
1,315
r
2.R
# Concatenating a list of data.tables # A list of data.tables has been loaded into your R session: gdp. Its elements contain a data.table for each continent, each data.table containing the gross domestic product (gdp) in the year 2000 for the countries in each continent (data sourced from the Gapminder foundation). Your goal is to build a new data.table containing the observations from all data.tables in the gdp list # # Instructions 1/3 # 35 XP # 1 # 2 # 3 # Concatenate all data.tables in the gdp list, saving the result to gdp_all_1. # # # Concatenate its data.tables gdp_all_1 <- rbindlist(gdp) # Modify the code so that a new column, "continent" is created in the result stored in gdp_all_2, which contains the continent(s) each country is located in. # Inspect the result in your console. # Concatenate its data.tables gdp_all_2 <- rbindlist(gdp, idcol = "continent") # In your console run the code to check the result of your previous call to rbindlist(). Can you see any problems? # Modify your call to rbindlist() to fix any problems you have found, saving the result in gdp_all_3 # Run this code to inspect gdp_all_2 gdp_all_2 <- rbindlist(gdp, idcol = "continent") str(gdp_all_2) gdp_all_2[95:105] # Fix the problem gdp_all_3 <- rbindlist(gdp, idcol = "continent", use.names = TRUE) gdp_all_3
8c432ec07ab667d6e6745ea2176be6102a96a283
9f2df28e9a44cf50c0249030121621f6b4172f16
/man/FAMEoutliers.Rd
5108cc02401fd7e43e6d0fdfda1ad8258083f10f
[]
no_license
acinostroza/TargetSearch
7246e8c473403a72cd295ce6b6924c2b1766ae41
31ffae96fcefdeb8cd73138efd789fe28b511ca4
refs/heads/master
2023-07-05T20:46:15.736431
2023-06-26T13:10:43
2023-06-26T13:10:43
126,149,138
4
0
null
2019-08-19T09:46:32
2018-03-21T08:47:25
R
UTF-8
R
false
false
2,180
rd
FAMEoutliers.Rd
\name{FAMEoutliers} \alias{FAMEoutliers} \title{ FAME outlier detection } \description{ A function to detect retention time marker (FAME) outliers. } \usage{ FAMEoutliers(samples, RImatrix, pdffile = NA, startDay = NA, endDay = NA, threshold = 3, group.threshold = 0.05) } \arguments{ \item{samples}{ A \code{tsSample} object created by \code{ImportSamples} function. } \item{RImatrix}{ A retention time matrix of the found retention time markers. } \item{pdffile}{ A character string naming a PDF file where the FAMEs report will be saved. } \item{startDay}{ A numeric vector with the starting days of your day groups. } \item{endDay}{ A numeric vector with the ending days of your day groups. } \item{threshold}{ A standard deviations cutoff to detect outliers. } \item{group.threshold}{ A numeric cutoff to detect day groups based on hierarchical clustering. Must be between \code{0..1}.} } \details{ If no \code{pdffile} argument is given, the report will be saved on a file called \code{"TargetSearch-YYYY-MM-DD.FAME-report.pdf"}, where \code{YYYY-MM-DD} is a date. If both \code{startDay} and \code{endDay} are not given, the function will try to detect day groups using a hierarchical clustering approach by cutting the tree using \code{group.threshold} as cutoff height. Retention time markers that deviate more than \code{threshold} standard deviations from the mean of their day group will be identified as outliers. } \value{ A logical matrix of the same size of \code{RImatrix}. A \code{TRUE} value indicates that the retention time marker in that particular sample is an outlier. } \examples{ # load pre-calculated example data and objects data(TSExample) # find the retention marker outliers of the example data and save it in "outlier.pdf" outliers <- FAMEoutliers(sampleDescription, RImatrix, pdffile = "outlier.pdf") # find the outliers (although they are reported in the output PDF file) apply(outliers, 1, which) } \author{Alvaro Cuadros-Inostroza, Matthew Hannah, Henning Redestig } \seealso{ \code{\link{RIcorrect}}, \code{\link{ImportSamples}}, \code{\link{TSExample}} }
a109e1e6c1d7ce3068e4ca37a7c26ddbf6742635
0e5948d9b3bfe27ebc00cbb97c7095a68b10c29d
/data.prep.R
290227beb645e941434d2fccccf06aa72a69fb55
[]
no_license
asherstnev/MDSP.final.project
4a33c992a2138ec3d8a609c7aa5efae2e7b57532
46b95ff3b5b0058381d58b382b1ed56b8e733d05
refs/heads/master
2021-01-13T04:45:29.352922
2017-01-23T20:26:47
2017-01-23T20:26:47
79,108,969
0
0
null
null
null
null
UTF-8
R
false
false
6,498
r
data.prep.R
library(data.table) library(compare) ###################################################################################### # load raw data load.raw.data <- function(in.file) { raw.data <- fread(in.file) # rename some columns setnames(raw.data, "Loan ID", "loan.id") setnames(raw.data, "Customer ID", "customer.id") setnames(raw.data, "Loan Status", "status.str") setnames(raw.data, "Current Loan Amount", "amount.str") setnames(raw.data, "Credit Score", "credit.score") setnames(raw.data, "Years in current job", "Years.in.current.job") setnames(raw.data, "Home Ownership", "Home.Ownership") setnames(raw.data, "Annual Income", "income.str") setnames(raw.data, "Monthly Debt", "debt.str") setnames(raw.data, "Years of Credit History", "Years.of.Credit.History") setnames(raw.data, "Months since last delinquent", "Months.since.last.delinquent") setnames(raw.data, "Number of Open Accounts", "Number.of.Open.Accounts") setnames(raw.data, "Number of Credit Problems", "Number.of.Credit.Problems") setnames(raw.data, "Current Credit Balance", "Current.Credit.Balance") setnames(raw.data, "Maximum Open Credit", "Maximum.Open.Credit") setnames(raw.data, "Tax Liens", "Tax.Liens") # convert charachter status into numeric # Charged Off (BAD!) -> 0 # Fully Paid (GOOD!) -> 1 raw.data[, status := 0] raw.data[status.str == "Fully Paid", status := 1] raw.data } raw.data <- load.raw.data("data/LoansTrainingSetV2_fixed.csv") new.raw.data <- load.raw.data("data/big.LoansTrainingSetV2_fixed.csv") ###################################################################################### # data cleansing function data.cleansing <- function(in.data) { # convert potentially character feature into numeric in.data[, debt := as.numeric(debt.str)] in.data[, income := as.numeric(income.str)] in.data[, amount := as.numeric(amount.str)] # fix features with $ or £ if (0 < nrow(in.data[is.na(debt)])) { in.data[is.na(debt), debt.str := gsub("[$£]","", debt.str)] in.data[is.na(debt), debt := as.numeric(debt.str)] in.data[, debt.str := NULL] } #in.data[is.na(income), income.str := gsub("[$£]","", income.str)] #in.data[is.na(income), income := as.numeric(income.str)] in.data[, income.str := NULL] #in.data[is.na(amount), amount.str := gsub("[$£]","", amount.str)] #in.data[is.na(amount), amount := as.numeric(amount.str)] in.data[, amount.str := NULL] # remove pure duplicates sel.data <- in.data[!duplicated(in.data)] # remove records with wrong amount, income, or credit.score values label.wrong.records <- function(amount, income, credit.score) { if (1 == length(amount)) return(TRUE) flag <- rep(TRUE, length(amount)) flag[amount == 99999999 | is.na(income) | is.na(credit.score)] <- FALSE return(flag) } sel.data[, keep.record := label.wrong.records(amount, income, credit.score), by=.(customer.id)] sel.data <- sel.data[T == keep.record] sel.data[, keep.record := NULL] sel.data[, N.records := .N, by=.(loan.id)] if (0) { # number of Loan/Customer ID sel.data[, N.customers := .N, by=.(loan.id)] sel.data[, N.loan.ids := .N, by=.(customer.id)] table(sel.data$N.customers) table(sel.data$N.loan.ids) sel.data[, N.customers := NULL] sel.data[, N.loan.ids := NULL] } # convert Years in current job into numerical feature sel.data[, job.years := -99] sel.data[Years.in.current.job == "n/a", job.years := -1] sel.data[Years.in.current.job == "< 1 year", job.years := 0] sel.data[Years.in.current.job == "10+ years", job.years := 99] f <- function(c){as.numeric(strsplit(c, split = " ")[1][[1]][1])} sel.data[job.years == -99, job.years := sapply(Years.in.current.job, f)] sel.data[, Years.in.current.job := NULL] # convert int features into numeric ones sel.data[, Months.since.last.delinquent := as.numeric(Months.since.last.delinquent)] sel.data[, Number.of.Open.Accounts := as.numeric(Number.of.Open.Accounts)] sel.data[, Number.of.Credit.Problems := as.numeric(Number.of.Credit.Problems)] sel.data[, Current.Credit.Balance := as.numeric(Current.Credit.Balance)] sel.data[, Maximum.Open.Credit := as.numeric(Maximum.Open.Credit)] sel.data[, Bankruptcies := as.numeric(Bankruptcies)] sel.data[, credit.score := as.numeric(credit.score)] # remove one id sel.data[, customer.id := NULL] sel.data } # apply the data cleansing function system.time(sel.data <- data.cleansing(copy(raw.data))) # fix problem with credit.score >1000 sel.data[credit.score > 1000, credit.score := credit.score / 10] if (0) { system.time(new.sel.data <- data.cleansing(copy(new.raw.data))) test1.data <- fread("data/attept.1.csv") new.sel.data[, old.rec := 0] new.sel.data[loan.id %in% sel.data$loan.id, old.rec := 1] new.sel.data[, test1.rec := 0] new.sel.data[loan.id %in% test1.data$`Loan ID`, test1.rec := 1] used.for.test <- new.sel.data[, test1.rec == 1] } # look at the data summary(sel.data) ###################################################################################### # check with dummy model if (1) { dummy.model <- function(d) { d[, status.pred := 1] d[credit.score > 1000, status.pred := 0] d } # apply dummy model dd <- dummy.model(raw.data) the.acc <- nrow(dd[status == status.pred])/nrow(dd) print(paste0("Dummy model on raw data: acc = ", round(100*the.acc, 1), "%")) dd <- dummy.model(sel.data) the.acc <- nrow(dd[status == status.pred])/nrow(dd) print(paste0("Dummy model on clean data: acc = ", round(100 * the.acc, 1), "%")) dd <- dummy.model(new.sel.data) the.acc <- nrow(dd[status == status.pred])/nrow(dd) print(paste0("Dummy model on big clean data: acc = ", round(100 * the.acc, 1), "%")) rm(dd) dd <- dummy.model(new.sel.data[old.rec == 0]) the.acc <- nrow(dd[status == status.pred])/nrow(dd) print(paste0("Dummy model on big clean data (excluding real training dat): acc = ", round(100 * the.acc, 1), "%")) rm(dd) } ###################################################################################### # save clean data sel.data[, status.pred := NULL] write.table(sel.data, file="data/sel.data.tsv", sep="\t", row.names = F)
9359d82167a6fb7171416d2ff2b9923abc058895
bf1e79e2ee906acf1a6b5b414f3e78dc16d1e437
/man/source_han.Rd
7b73f32673f031a2b9cdbdb68ecc5996f0fbd108
[]
no_license
cran/showtextdb
496a08b240ee3cdd76823085ced5c7d622b0bf5e
b1e4f2d1da68de68c1ade7d550d5334dcb62f510
refs/heads/master
2021-08-06T09:13:47.076173
2020-06-04T07:10:02
2020-06-04T07:10:02
31,942,397
0
0
null
null
null
null
UTF-8
R
false
true
1,048
rd
source_han.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/source_han.R \name{source_han} \alias{source_han} \alias{source_han_sans} \alias{source_han_serif} \title{Meta-information for the Source Han Sans/Serif Fonts} \usage{ source_han_sans(lang = c("CN", "TW", "JP", "KR")) source_han_serif(lang = c("CN", "TW", "JP", "KR")) } \arguments{ \item{lang}{Language of the font. "CN" for simplified Chinese, "TW" for traditional Chinese, "JP" for Japanese, and "KR" for Korean.} } \description{ These functions provide information of the Source Han Sans/Serif fonts that can be used in the \code{\link{font_install}()} function. Source Han Sans/Serif fonts provide complete support for the CJK (\strong{C}hinese, \strong{J}apanese, and \strong{K}orean) characters. } \examples{ \dontrun{ ## Install Source Han Sans font (by default Simplified Chinese) ## to the showtexdb package font_install(source_han_sans()) ## Source Han Serif Japanese font_install(source_han_serif("JP")) } } \author{ Yixuan Qiu <\url{https://statr.me/}> }
401b1b2ce83c6fd0ede102f93b578f43189af07d
147706932a9deff1d9b12fed0783047110ba2191
/plot3.R
8cd48c211f7f44cc79c0fed441d75dea94431d57
[]
no_license
ok-datascience/ExData_Plotting1
023755f6b8ce302da85d27a775b986af67cd3364
8f5d63de3275aede7afb3d5d29e39339317c60a9
refs/heads/master
2021-01-09T07:31:00.187201
2015-01-09T13:40:41
2015-01-09T13:40:41
28,893,617
0
0
null
2015-01-07T02:08:16
2015-01-07T02:08:15
null
UTF-8
R
false
false
931
r
plot3.R
# read sample data allData <- read.csv('../exdata1/household_power_consumption.txt', sep = ';',header = T, na.strings="?") # convert separate date and time fields to POSIXlt date type allData$Date <- strptime(paste(allData$Date, allData$Time), "%d/%m/%Y %H:%M:%S") # subset sample data plotData <- allData[(as.Date(allData$Date) == '2007-02-01' | as.Date(allData$Date) =='2007-02-02'),] # make a plot plot(plotData$Date, plotData$Sub_metering_1, type="n", xlab="", ylab = "Energy sub metering") lines(plotData$Date, plotData$Sub_metering_1) lines(plotData$Date, plotData$Sub_metering_2, col="red") lines(plotData$Date, plotData$Sub_metering_3, col="blue") legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c("black","red", "blue"), lty=c(1, 1, 1), cex=0.8, y.intersp=0.5, text.width = strwidth("Sub_metering_1")) # save plot to the file in PNG image format dev.copy(png, 'plot3.png') dev.off()
48d11d776782c27a8889b08811d6a3cce6f74726
2975fba6bf359214c55e7d936f896a5a4be3d8f5
/man/FGR.Rd
e2953ddf5ab5936fa9c3907af4f5afa8f347d06b
[]
no_license
tagteam/riskRegression
6bf6166f098bbdc25135f77de60122e75e54e103
fde7de8ca8d4224d3a92dffeccf590a786b16941
refs/heads/master
2023-08-08T03:11:29.465567
2023-07-26T12:58:04
2023-07-26T12:58:04
36,596,081
38
14
null
2023-05-17T13:36:27
2015-05-31T09:22:16
R
UTF-8
R
false
true
3,109
rd
FGR.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/FGR.R \name{FGR} \alias{FGR} \title{Formula wrapper for crr from cmprsk} \usage{ FGR(formula, data, cause = 1, y = TRUE, ...) } \arguments{ \item{formula}{A formula whose left hand side is a \code{Hist} object -- see \code{\link{Hist}}. The right hand side specifies (a linear combination of) the covariates. See examples below.} \item{data}{A data.frame in which all the variables of \code{formula} can be interpreted.} \item{cause}{The failure type of interest. Defaults to \code{1}.} \item{y}{logical value: if \code{TRUE}, the response vector is returned in component \code{response}.} \item{\dots}{...} } \value{ See \code{crr}. } \description{ Formula interface for Fine-Gray regression competing risk models. } \details{ Formula interface for the function \code{crr} from the \code{cmprsk} package. The function \code{crr} allows to multiply some covariates by time before they enter the linear predictor. This can be achieved with the formula interface, however, the code becomes a little cumbersome. See the examples. Note that FGR does not allow for delayed entry (left-truncation). The assumed value for indicating censored observations in the event variable is \code{0}. The function \code{Hist} has an argument \code{cens.code} which can change this (if you do not want to change the event variable). } \examples{ library(prodlim) library(survival) library(cmprsk) library(lava) d <- prodlim::SimCompRisk(100) f1 <- FGR(Hist(time,cause)~X1+X2,data=d) print(f1) ## crr allows that some covariates are multiplied by ## a function of time (see argument tf of crr) ## by FGR uses the identity matrix f2 <- FGR(Hist(time,cause)~cov2(X1)+X2,data=d) print(f2) ## same thing, but more explicit: f3 <- FGR(Hist(time,cause)~cov2(X1)+cov1(X2),data=d) print(f3) ## both variables can enter cov2: f4 <- FGR(Hist(time,cause)~cov2(X1)+cov2(X2),data=d) print(f4) ## change the function of time qFun <- function(x){x^2} noFun <- function(x){x} sqFun <- function(x){x^0.5} ## multiply X1 by time^2 and X2 by time: f5 <- FGR(Hist(time,cause)~cov2(X1,tf=qFun)+cov2(X2),data=d) print(f5) print(f5$crrFit) ## same results as crr with(d,crr(ftime=time, fstatus=cause, cov2=d[,c("X1","X2")], tf=function(time){cbind(qFun(time),time)})) ## still same result, but more explicit f5a <- FGR(Hist(time,cause)~cov2(X1,tf=qFun)+cov2(X2,tf=noFun),data=d) f5a$crrFit ## multiply X1 by time^2 and X2 by sqrt(time) f5b <- FGR(Hist(time,cause)~cov2(X1,tf=qFun)+cov2(X2,tf=sqFun),data=d,cause=1) ## additional arguments for crr f6<- FGR(Hist(time,cause)~X1+X2,data=d, cause=1,gtol=1e-5) f6 f6a<- FGR(Hist(time,cause)~X1+X2,data=d, cause=1,gtol=0.1) f6a } \references{ Gerds, TA and Scheike, T and Andersen, PK (2011) Absolute risk regression for competing risks: interpretation, link functions and prediction Research report 11/7. Department of Biostatistics, University of Copenhagen } \seealso{ \code{\link{riskRegression}} } \author{ Thomas Alexander Gerds \email{tag@biostat.ku.dk} } \keyword{survival}
1d4eb10c55c65858a58863438538809528c3cc8b
e573bc7fd968068a52a5144a3854d184bbe4cda8
/Recommended/survival/R/frailty.gammacon.R
6be48fb433864ec947dc4959d6d489b410ef6bcd
[]
no_license
lukaszdaniel/ivory
ef2a0f5fe2bc87952bf4471aa79f1bca193d56f9
0a50f94ce645c17cb1caa6aa1ecdd493e9195ca0
refs/heads/master
2021-11-18T17:15:11.773836
2021-10-13T21:07:24
2021-10-13T21:07:24
32,650,353
5
1
null
2018-03-26T14:59:37
2015-03-21T21:18:11
R
UTF-8
R
false
false
768
r
frailty.gammacon.R
# $Id: frailty.gammacon.S 11166 2008-11-24 22:10:34Z therneau $ # Correct the loglik for a gamma frailty # Term2 is the hard one, discussed in section 3.5 of the report # The penalty function only adds \vu \sum(w_j) to the CoxPL, so this # does a bit more than equation 15. # frailty.gammacon <- function(d, nu) { maxd <- max(d) if (nu > 1e7*maxd) term1 <- sum(d*d)/nu #second order Taylor series else term1 <- sum(d + nu*log(nu/(nu+d))) #easy part tbl <- table(factor(d[d>0], levels = seq_len(maxd))) ctbl<- rev(cumsum(rev(tbl))) dlev<- seq_len(maxd) term2.numerator <- nu + rep(dlev-1, ctbl) term2.denom <- nu + rep(dlev, tbl*dlev) term2 <- sum(log(term2.numerator/term2.denom)) term1 + term2 }
7159a21b55f9fbce8bbe0e25c94e44224e9f0e02
4ae65ee9e98ab3e9b279c5af16cf15bbc364d34e
/years 2015-18.R
67f2b47519449d999a6df58965bb57a322957f90
[]
no_license
emcbride09/Are-points-the-most-important-NBA-Championship-factor
d5fd5ce601036e867553eaf0d59d04d0598ce7d0
26004c28a1a61a65694a6ef5b7282bd8e2f237fb
refs/heads/master
2023-01-31T12:09:01.517622
2020-12-15T13:56:20
2020-12-15T13:56:20
158,214,059
0
0
null
null
null
null
UTF-8
R
false
false
5,346
r
years 2015-18.R
require(tidyverse) url <- "https://www.teamrankings.com/nba/stat/points-per-game?date=2016-06-20" fifteenstat <- read_html(url) %>% html_nodes("table") %>% .[[1]] %>% html_table() fifteen <- fifteenstat %>% select(Team, `2014`, `2015`) url_17 <- 'https://www.teamrankings.com/nba/stat/points-per-game?date=2018-06-09' seventeen <- read_html(url_17) %>% html_nodes("table") %>% .[[1]] %>% html_table() %>% select(Team, `2016`, `2017`) join_3 <- full_join(seventeen, fifteenstat) url_18 <- "https://www.teamrankings.com/nba/stat/points-per-game?date=2018-11-18" eighteen <- read_html(url_18) %>% html_nodes("table") %>% .[[1]] %>% html_table() %>% select(Team, `2018`) last_4 <- full_join(join_3, eighteen) last_4 <- last_4 %>% select(Team, `2014`, `2015`, `2016`, `2017`, `2018`) last_4 <- last_4 %>% gather(key = 'year_id', value = 'av_point', `2014` : `2018`)%>% group_by(year_id) %>% mutate(rank = dense_rank(desc(av_point))) ###get champs url_champs <- "https://en.wikipedia.org/wiki/List_of_NBA_champions#Champions" champs_list <- read_html(url_champs) %>% html_nodes(".hlist ul , #NBA_champions") %>% html_text() champs_list <- champs_list[[6]] champs_list <- champs_list %>% str_replace_all("Trail Blazers", "Trailblazers") #parse list to df champs_df <- data.frame(do.call('rbind',(strsplit(champs_list, "\n", fixed = TRUE)))) #transpose easy champs_t <- as.data.frame(t(champs_df)) champs_t$V1 <- as.character(champs_t$V1) year_champs <- as.data.frame(str_split_fixed(champs_t$V1, ":", 2)) year_champs <- year_champs %>% rename(year_id = V1, team = V2) last_4$Team[last_4$Team == 'Golden State Warriors'] <- " Golden State Warriors" last_4$Team[last_4$Team == 'Warriors'] <- " Golden State Warriors" last_4$Team[last_4$Team == 'Golden State'] <- " Golden State Warriors" last_4$Team[last_4$Team == 'San Antonio Spurs'] <- " San Antonio Spurs" last_4$Team[last_4$Team == ' Cleveland Cavaliers'] <- " Cleveland Cavaliers" four_final <- full_join(last_4, year_champs, by = 'year_id') four_final$team <- as.character(four_final$team) four_final2 <- four_final %>% mutate(result = ifelse(Team == team, "champs", "DNQ")) final_four_3 <- four_final2 %>% filter(year_id > 2013) final_table_3 <- rename(final_four_3, fran_id = Team, finish = result) final_table_3$year_id <- as.numeric(final_table_3$year_id) final_table_4 <- left_join(clean_rank, final_table_3) clean2 <- select(clean_rank, year_id, fran_id, rank, av_point, result, finish) final_table_5 <- rbind(clean2, final_table_3) final_table_5 <- unique(final_table_5) %>% select(-result) #learn how to parse JS at some point champ_plot2 <- ggplot(final_table_5 %>% group_by(year_id), aes(year_id, av_point, color = finish, alpha = finish)) + labs(title = "Defence Wins Championships", subtitle = "Times the NBA Champions have also averaged \nthe most points per season", x = "Season (year ending)", y = "Average Points Per Season") + geom_point(aes(size = 12, shape = finish)) + geom_text_repel(data = filter(final_table_5, finish == "champs" & rank == 1), aes(year_id, av_point, size = 12, label = paste("",fran_id,",",year_id,"")), show.legend = FALSE, nudge_x = -7) + scale_shape_manual(values = c(20, 21, 6)) + scale_alpha_manual(values = c(0.8, 0.3)) + scale_color_discrete(name = "NBA Champions", labels = c("Champions", "DNQ", "Playoffs")) champ_plot2 <- champ_plot2 + scale_x_continuous(breaks = seq(min(1940), max(2019), by = 5)) + theme(plot.title = element_text(size = 32, family = "Arial", colour = "white", face = "bold"), plot.subtitle = element_text(size = 20, family = "Arial", colour = "white", face = "italic"), plot.background = element_rect(fill = "black"), panel.background = element_rect(fill = "black"), legend.background = element_blank(), axis.text.x = element_text(colour = "white", size = 13), axis.text.y = element_text(colour = "white", size = 13), axis.title.x = element_text(colour = "white", size = 20, face = 'bold'), axis.title.y = element_text(colour = "white", size = 20, face = 'bold'), legend.text = element_text(colour = "white", size = 16), panel.grid.major.y = element_blank(), panel.grid.minor.y = element_blank(), panel.grid.major.x = element_line(color = "grey25"), panel.grid.minor.x = element_line(color = "grey18"), legend.title = element_text(color = 'white', family = "Arial", size = 22), legend.key = element_blank()) + guides(size = FALSE, shape = FALSE, result = FALSE, alpha = FALSE) + annotate("label", x = 1983, y = 65, label = "There have been 3 instances in the History of the NBA where the highest average scoring team has won the championship \n two of which have occurred in the past 3 years", color = "grey60", fill = 'black') + scale_shape_manual(values = c(20, 21, 7, 7)) + scale_alpha_manual(values = c( 1, 0.6, 0.8, 0.8)) + scale_color_discrete(name = "NBA Champions", labels = c("Champions", "DNQ")) champ_plot2 ggsave("champs_av_points.png", plot = champ_plot2, width = 300, height =175, units = "mm", dpi = 400)
d3c4e7ed9cb88d0d778e929389586e3b9c55f0a6
af7d0c29242eb096032eddbdeeceeb702b0f34b7
/Code 4.R
a8e2857af96b68d84d2f517cfd26ae2775c420cf
[]
no_license
SoylabSingh/PATRIOT
b749746819283c18caf41135bbdaa16c240b5c8f
3e0a605b93c911980d38fc3444d41d73517436a5
refs/heads/master
2023-08-01T08:45:13.264077
2021-09-09T17:38:39
2021-09-09T17:38:39
291,137,928
1
0
null
null
null
null
UTF-8
R
false
false
9,453
r
Code 4.R
#Install packages needed install.packages("tidyr") install.packages("stringi") install.packages("stringr") install.packages("plyr") install.packages("dplyr") install.packages('rrBLUP') install.packages('data.table') library(tidyr) library(stringi) library(data.table) library(rrBLUP) library(plyr) library(dplyr) library(stringr) #####Loop through GP##### #####Initiate the environment with marker files##### setwd('/Users/User/Desktop/PATRIOT') A <- read.csv('imputeexample.csv') A2 <- read.csv('SoyNAMv2markersnumeric.csv') #Read in Pedigree file. Progeny name in first column, female parent in second, male parent in third.# Z <- read.csv('SoyNAMpedigrees.csv',header = T) #Create list of all possible parents from pedigree# Unique <- unique(c(Z[,2],Z[,3])) #Create list of possible parent combinations from pedigree# Predup <- unique(Z[,2:3]) #Coerce parental combinations to same format as Pedigree Tracing.R# Dupe <- vector(mode="list",length=nrow(Predup)) for(j in 1:nrow(Predup)){ Dupe[j] <- paste(Predup[j,1],",",Predup[j,2])} Dupe <- gsub(" ","",Dupe) #Set rownames rownames(A) <- A$SNP #Remove Chr, Pos, MarkerName columns from 'traced' file A$SNP <- NULL A$CHROM <- NULL A$POS <- NULL #Transpose for use in calculating allele effect estimates A1 <- data.frame(t(A)) #Set rownames rownames(A2) <- A2$SNP #Remove Chr, Pos, MarkerName columns from raw marker file (0,1,2 format) A2$SNP <- NULL A2$CHROM <- NULL A2$POS <- NULL #Transpose A3 <- data.frame(t(A2)) #Read in phenotypic records- in this case, multi-trait, multi-environment. df <- read.csv('NAMFullPheno.csv') #Generate list of environments enviros=unique(df$Env) #Clear space rm(A2,A) #Remove NAM parents from consideration in GS model A3 <- A3[41:5189,] #Initiate df to store correlations in Recordcorrelations <- data.frame() #Garbage collection gc() gc() gc() #We care about A1,A3,df,enviros. Others are superfluous. #Loop through environments if multi-environment data. for(q in 1:length(enviros)){ #Get environment name p <- enviros[q] #Subset phenotypic records to those from the qth environment s <- df[which(df$Env==p),] #Set rownames to line names rownames(s) <- s$CorrectedStrain #Remove line name column s$CorrectedStrain <- NULL ######Change based on trait##### s <- s %>% drop_na(Yldkgha) #Subset lines with phenotype records to those in traced marker file df1 <- s[which(rownames(s) %in% rownames(A1)),] #Subset traced marker file to those in phenotyic records file A2 <- A1[which(rownames(A1) %in% rownames(df1)),] #Subset lines with phenotype records to those in numeric marker file df2 <- s[which(rownames(s) %in% rownames(A3)),] #Subset numeric marker file to those with phenotypic records A4 <- A3[which(rownames(A3) %in% rownames(df2)),] #Determine number of lines with both phenotypic and genotypic records u <- nrow(df1) #Subset out the trait of interest (only necessary for multi-trait records) Pheno <- df1 %>% select(Yldkgha) #change based on trait #For evaluation only, we subset 80% of lines to use as training training_entries <- as.matrix(sample(1:u,floor(u*0.8))) #The lines not used for training are used as testing (unseen). #This step can be modified for production use to take those with phenotypic records as training and those with only genotypic records as testing. testing_entries <- setdiff(1:u,training_entries) #Subset phenotypic records for training set Pheno_training_data = as.matrix(Pheno[training_entries,]) #Set the column name for the phenotypic records so that it will be consistent for downstream steps. colnames(Pheno_training_data) <- "aeematrix" #Subset phenotypic records for testing set Pheno_testing_data = as.matrix(Pheno[testing_entries,]) #Subset marker data for traced training set SNP_training_data = as.matrix(A2[training_entries,]) #Subset marker data for traced testing set SNP_testing_data = as.matrix(A2[testing_entries,]) #Subset marker data for numeric training set SNP_training_data2 = as.matrix(A4[training_entries,]) #Subset marker data for numeric testing set SNP_testing_data2 = as.matrix(A4[testing_entries,]) #Attach phenotypic data to the end of the traced training marker set B <- cbind(SNP_training_data,as.data.frame(Pheno_training_data)) #Create df to store allele effect estimates aeematrix <- data.frame() #Loop through each marker for(a in 1:(ncol(B)-1)) { #Loop through known parents for (h in 1:length(Unique)) { #Calculate the mean phenotype of lines with current allele coming from specific parent compared to entire population aeematrix[h,a] <- mean(B[,ncol(B)][which(B[,a]==Unique[h])])-mean(B[,ncol(B)])}} rownames(aeematrix) <- Unique colnames(aeematrix) <- colnames(B[1:(ncol(B)-1)]) #NA handling aeematrix[aeematrix=="NaN"] <- 0 PostParents <- nrow(aeematrix) #Loop through markers# for(b in 1:(ncol(B)-1)) { #Loop through parental combinations# for (c in 1:length(Dupe)) { #Get the first potential list of parent pairs# TwoParents <- unlist(strsplit(Dupe[[c]],',')) #Extract the first parent name# FirstPar <- TwoParents[[1]] #Extract the second parent name# SecondPar <- TwoParents[[2]] #Record the AEE of the first parent rose <- aeematrix[FirstPar,b] #Record the AEE of the second parent rose2 <- aeematrix[SecondPar,b] #Average the two and save as new column in aeematrix aeematrix[c+PostParents,b] <- 0.5*(rose+rose2) }} FullRowNameList <- c(Unique,Dupe) rownames(aeematrix) <- FullRowNameList for(Zed in 1:ncol(aeematrix)){ #Create df to look up AEEs and replace traced markers with replacewith <- as.data.frame(aeematrix[,a]) #Add the name associated with each class. It is critical to have the names be in the proper order. replacewith$Lookup <- rownames(aeematrix) #Set column name colnames(replacewith) <- c("AEE","Lookup") replacement <- replacewith[seq(dim(replacewith)[1],1),] #Replace traced marker classes with AEEs in training dataframe SNP_training_data[,Zed] <- stri_replace_all_fixed(SNP_training_data[,Zed], pattern = replacement$Lookup, replacement = replacement$AEE, vectorize_all = FALSE) #Replace traced marker classes with AEEs in testing dataframe SNP_testing_data[,Zed] <- stri_replace_all_fixed(SNP_testing_data[,Zed], pattern = replacement$Lookup, replacement = replacement$AEE, vectorize_all = FALSE) } #Convert to data.frame SNP_testing_data <- data.frame(SNP_testing_data) #Convert to data.frame SNP_training_data <- data.frame(SNP_training_data) #Next four lines- write and read to get AEE data in a format suitable for rrBLUP fwrite(SNP_training_data,"train.temp") b1 <- fread("train.temp",colClasses = "numeric") fwrite(SNP_testing_data,"test.temp") b2 <- fread("test.temp",colClasses = "numeric") #Train a model with the AEEs as a replacement for raw marker data trained_model <- mixed.solve(y=Pheno_training_data,Z=b1) #Record marker effects marker_effects <- as.matrix(trained_model$u) #Generate BLUEs from the trained model BLUE <- as.vector(trained_model$beta) #Calculate sum of marker effects for training set predicted_train = as.matrix(b1) %*% marker_effects #Calculate sum of marker effects for testing set predicted_test = as.matrix(b2) %*% marker_effects #Add in the BLUEs to generate predictions for the training set predicted_train_result <- as.vector((predicted_train[,1])+BLUE) #Add in the BLUEs to generate predictions for the testing set predicted_test_result <- as.vector((predicted_test[,1])+BLUE) #Do the same as above with the raw marker data (0,1,2 format) trained_model2 <- mixed.solve(y=Pheno_training_data,Z=SNP_training_data2) marker_effects2 <- as.matrix(trained_model2$u) BLUE2 <- as.vector(trained_model2$beta) predicted_train2 = as.matrix(SNP_training_data2) %*% marker_effects2 predicted_test2 = as.matrix(SNP_testing_data2) %*% marker_effects2 predicted_train_result2 <- as.vector((predicted_train2[,1])+BLUE2) predicted_test_result2 <- as.vector((predicted_test2[,1])+BLUE2) #First column gets the environment name Recordcorrelations[q,1] <- p #Second column gets the correlation between predicted and observed values for PATRIOT-rrBLUP testing set Recordcorrelations[q,2] <- cor(as.vector(Pheno_testing_data),predicted_test_result,use="complete") #Third column gets the correlation between predicted and observed values for PATRIOT-rrBLUP training set Recordcorrelations[q,3] <- cor(as.vector(Pheno_training_data),predicted_train_result,use="complete") #Fourth column gets the correlation between predicted and observed values for standard rrBLUP testing set Recordcorrelations[q,4] <- cor(as.vector(Pheno_testing_data),predicted_test_result2,use="complete") #Fifth column gets the correlation between predicted and observed values for standard rrBLUP training set Recordcorrelations[q,5] <- cor(as.vector(Pheno_training_data),predicted_train_result2,use="complete") #Optionally, save AEE matrix. write.csv(aeematrix,"aeematrix1.csv") } #Label columns to keep track of which column refers to which correlation colnames(Recordcorrelations) <- c("Environment","TestingAEE","TrainingAEE","TestingStandard","TrainingStandard") #Write to file write.csv(Recordcorrelations,'Standard vs AEEYld21.csv')
305093f5954c67991fdf336598fb683d6b810e37
7058b908bbb57fa425c6a0829ff08c2f3159fd7c
/cachematrix.R
fe7f4af743a0998a742cfadca71a8b6e35eb780e
[]
no_license
defaultersan/ProgrammingAssignment2
2dad32fdb1ec7288430f181031933e081b9bb45e
f5e69ed2a9953b840933384f3e51dd8b63290c41
refs/heads/master
2020-12-15T03:08:32.525439
2014-05-25T22:33:15
2014-05-25T22:33:15
null
0
0
null
null
null
null
UTF-8
R
false
false
1,667
r
cachematrix.R
## makeCacheMatric function is in turn exposing various functions to ## get/set the value of a matrix ## get/set the inverse of a square matrix ## cacheSolve function computes the inverse of a matrix. ## If the inverse matrix has already been calculated (and the matrix has not changed), ## then retrieves the inverse from the cache ## Creates and exposes following functions ## set function sets the value of the matrix ## get function gets the value of the matrix ## setinverse function sets the inverse of the square matrix ## getinverse function gets the inverse of the square matrix makeCacheMatrix <- function(x = matrix()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x ## Sets the cache with the inverted matrix setinverse <- function(solve) m <<- solve ## Get the inverse matrix getinverse <- function() m ## Expose all functions list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## Computes the inverse of a square matrix that was passed in cacheSolve <- function(x, ...) { m <- x$getinverse() ## Check if inverse has already been computed if(!is.null(m)) { ## if yes, then return the cached information message("getting cached data") return(m) } ## Inverse has not been processed yet, so go ahead with the processing data <- x$get() m <- solve(data, ...) ## Set the cache indicating inverse computed x$setinverse(m) ## Return a matrix that is the inverse of 'x' m }
b368a89c983c9a83368f17189173d1075a20f6d7
85b80782f63b3482d8aa6317934c704b626387e4
/Modelos/Lufthansa_LHA_DE/Luf_10a19.R
c9908a53d513f2ffb29a07b738b559da93431dc0
[]
no_license
MathNog/UndergraduateResearchProject
23ab0a6bb6d53ba54353d6bf1c4980ea11a500da
404de409d2746006fb19316f4c84becaab2e33c7
refs/heads/master
2023-07-13T03:23:28.074473
2021-08-31T01:04:31
2021-08-31T01:04:31
286,582,042
0
0
null
null
null
null
ISO-8859-1
R
false
false
4,427
r
Luf_10a19.R
library(forecast) library(tseries) library(FitARMA) #Setar diretório correto setwd("C:/Users/Matheus/Desktop/PUC/IC/CiasAereas/Modelos/Lufthansa_LHA_DE") #Leitura dos dados -> preços BOEING arquivo=read.csv(file="LHA_DE2010a2019.csv") #Selecionando apenas o AdjClose -> podemos ignorar os outros AdjClose=arquivo[c(6)] #Convertendo para numeric a fim de limpar NA e NAN AdjClose_vec=as.numeric(t(AdjClose)) AdjClose_vec=AdjClose_vec[!is.na(AdjClose_vec)] AdjClose_vec=AdjClose_vec[!is.nan(AdjClose_vec)] #Vizualização dos dados originais jpeg(file="C:/Users/Matheus/Desktop/PUC/IC/CiasAereas/Modelos/Lufthansa_LHA_DE/Dados_10a19/AdjClose_Luft_10a19.jpeg",width = 658, height = 553) plot(AdjClose_vec,main="AdjClose Lufthansa 2010-2019",type="l") dev.off() #Para obter o log-return a partir dos preços #Temos que, o retorno rt = ln(Pt)-ln(Pt-1), ou seja, a diferença do log do preço atual e do log do preço em t-1. #A função diff retorna justamente AdjCloseserie_vec[t]-AdjCloseBA_vec[t-1] logret=diff(log(AdjClose_vec),lag=1) #Para vizualizar o retorno, não o preço jpeg(file="C:/Users/Matheus/Desktop/PUC/IC/CiasAereas/Modelos/Lufthansa_LHA_DE/Dados_10a19/LogRet_Luft_10a19.jpeg",width = 658, height = 553) plot(logret,main="Log-return AdjClose Lufthansa 2010-2019",type="l") dev.off() #Colocando BA_logret em uma variável do tipo TimeSeries serie=ts(logret) #Separando a serie em uma parte para ajuste do modelo e outra para comparação com previsão serie_2fit=serie[c(1:2400)] serie_2for=serie[c(2401:length(serie))] serie_ts_2fit=ts(serie_2fit) serie_ts_2for=ts(serie_2for) #Plot da série separada para ajuste jpeg(file="C:/Users/Matheus/Desktop/PUC/IC/CiasAereas/Modelos/Lufthansa_LHA_DE/Dados_10a19/Serie_Luft_10a19.jpeg",width = 658, height = 553) plot(serie_ts_2fit,main="Serie Temporal Log-return AdjClose Lufthansa 2010-2019 - to fit",type="l") dev.off() #Tratando sazonalidade decomp_serie=decompose(serie_ts_2fit)#nao reconhece nada #dickey fuller test para estacionaridade adf.test(serie_ts_2fit, alternative = "stationary") #p-value = 0.01 -> logo há 1% de chance da série apresentar raiz unitária -> a série é estacionária -> d=0 #PACF e PACF para determinar ordem jpeg(file="C:/Users/Matheus/Desktop/PUC/IC/CiasAereas/Modelos/Lufthansa_LHA_DE/Dados_10a19/ACF_Luft_10a19.jpeg",width = 658, height = 553) Acf(serie_ts_2fit,main='ACF Serie Temporal Log-return AdjClose Lufthansa 2010-2019 - to fit')#lags relevantes para MA(q) -> 4 e 11 dev.off() jpeg(file="C:/Users/Matheus/Desktop/PUC/IC/CiasAereas/Modelos/Lufthansa_LHA_DE/Dados_10a19/PACF_Luft_10a19.jpeg",width = 658, height = 553) Pacf(serie_ts_2fit,main='PACF Serie Temporal Log-return AdjClose Lufthansa 2010-2019 - to fit')#lags relevantes para AR(p) -> 4 e 11 dev.off() #Ajustando com base nas ACF e PACF -> classe FitARMA fit400=FitARMA(serie_ts_2fit, order = c(4,0,0), demean=TRUE, MeanMLEQ = FALSE,pApprox = 30, MaxLag = 30) fit1100=FitARMA(serie_ts_2fit, order = c(11,0,0), demean=TRUE, MeanMLEQ = FALSE,pApprox = 30, MaxLag = 30) fit004=FitARMA(serie_ts_2fit, order = c(0,0,4), demean=TRUE, MeanMLEQ = FALSE,pApprox = 30, MaxLag = 30) #coeficientes dos modelos coef(fit400) print(fit400) coef(fit1100) print(fit1100) coef(fit004) print(fit004) #Modelos com menores AIC, BIC e maior loglikelihood: (4,0,0); (4,0,0); (11,0,0) #Vejamos os resíduos, suas médias e acf dos melhores modelos res400=residuals.FitARMA(fit400) res1100=residuals.FitARMA(fit1100) jpeg(file="C:/Users/Matheus/Desktop/PUC/IC/CiasAereas/Modelos/Lufthansa_LHA_DE/Dados_10a19/Res(4,0,0)_Luft_10a19.jpeg",width = 658, height = 553) plot(res400,main="Residuals of ARIMA(4,0,0)",type="l") dev.off() jpeg(file="C:/Users/Matheus/Desktop/PUC/IC/CiasAereas/Modelos/Lufthansa_LHA_DE/Dados_10a19/Res(11,0,0)_Luft_10a19.jpeg",width = 658, height = 553) plot(res1100,main="Residuals of ARIMA(11,0,0)",type="l") dev.off() mean(res400) mean(res1100) jpeg(file="C:/Users/Matheus/Desktop/PUC/IC/CiasAereas/Modelos/Lufthansa_LHA_DE/Dados_10a19/ACFRes(4,0,0)_Luft_10a19.jpeg",width = 658, height = 553) Acf(res400,main="ACF for ARIMA(4,0,0) residuals'") dev.off() jpeg(file="C:/Users/Matheus/Desktop/PUC/IC/CiasAereas/Modelos/Lufthansa_LHA_DE/Dados_10a19/ACFRes(11,0,0)_Luft_10a19.jpeg",width = 658, height = 553) Acf(res1100,main="ACF for ARIMA(11,0,0) residuals'") dev.off() #Média próxima de zero, resíduos não correlacionados -> parece