blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d53a5c550f35bab96353fc14840530cd982c24da | 1a5afb2f54a846956e4fd8718d3250ea72f9e621 | /run_analysis.R | e00816c6b7454e535086d8284553fff99d6c6da0 | [] | no_license | rkspivey/TidyDataProject | 6e40a8c6ed5df4fb53f76694003f4f7b27602e4c | 8ae0b13b2443da2bdf58e53dbdd9796fddba0604 | refs/heads/master | 2021-03-12T19:42:24.186748 | 2015-02-22T22:08:16 | 2015-02-22T22:08:16 | 31,102,892 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,286 | r | run_analysis.R | library(reshape2)
##
## runAnalysis
##
## This function will read data from a UCI HAR Dataset directory, merge
## the training and test datasets into one dataset, and extract the mean
## and stddev of each measurement. Function assumes that the 'UCI HAR Dataset'
## directory is in the getwd().
##
runAnalysis <- function() {
rootDirectoryName <- "UCI HAR Dataset"
mergedData <- runAnalysisStep1(rootDirectoryName)
extractedData <- runAnalysisStep2(mergedData)
extractedData$Activity.Label <- runAnalysisStep3(rootDirectoryName, extractedData)
extractedData$Activity.ID <- NULL
finalData <- runAnalysisStep5(extractedData)
write.table(finalData, file="uciHarTidyDataset.txt", row.name=FALSE)
}
##
## runAnalysisStep1
##
## This function reads the training and test sets, and then
## merges the training and the test sets to create one data set.
## The return value is the merged data set, with the column names
## renamed to match the variable names in the features file.
runAnalysisStep1 <- function(rootDirectoryName) {
## read the training data
subjectTrainData <- read.table(paste(rootDirectoryName, "/train/subject_train.txt", sep=""))
xTrainData <- read.table(paste(rootDirectoryName, "/train/X_train.txt", sep=""))
yTrainData <- read.table(paste(rootDirectoryName, "/train/Y_train.txt", sep=""))
## read the test data
subjectTestData <- read.table(paste(rootDirectoryName, "/test/subject_test.txt", sep=""))
xTestData <- read.table(paste(rootDirectoryName, "/test/X_test.txt", sep=""))
yTestData <- read.table(paste(rootDirectoryName, "/test/Y_test.txt", sep=""))
## read the features and rename columns in xTrainData and xTestData
featuresData <- read.table(paste(rootDirectoryName, "/features.txt", sep=""))
colnames(xTrainData) <- featuresData[,2]
colnames(xTestData) <- featuresData[,2]
## merge the activity ids in the first column of the y data with x
xTrainData$Activity.ID <- yTrainData[,1]
xTestData$Activity.ID <- yTestData[,1]
## merge the subject ids in the first column of the subject data with x
xTrainData$Subject.ID <- subjectTrainData[,1]
xTestData$Subject.ID <- subjectTestData[,1]
## merge the x data into one dataset
rbind(xTrainData, xTestData)
}
##
## runAnalysisStep2
##
## Extract only the mean and standard deviation measurements from mergedData.
##
runAnalysisStep2 <- function(mergedData) {
columnsToExtract <- grep("\\-mean\\(\\)|\\-std\\(\\)", colnames(mergedData))
extractedData <- mergedData[,columnsToExtract]
extractedData$Activity.ID <- mergedData$Activity.ID
extractedData$Subject.ID <- mergedData$Subject.ID
extractedData
}
## runAnalysisStep3
##
## Return vector of labeled activities for the mergedData activity ids.
##
runAnalysisStep3 <- function(rootDirectoryName, mergedData) {
activityLabels <- read.table(paste(rootDirectoryName, "/activity_labels.txt", sep=""))
activityLabels[match(mergedData$Activity.ID, activityLabels[,1]),2]
}
## runAnalysisStep4
##
## This step was performed in step 1
##
## runAnalysisStep5
##
## Return data frame that has average of each variable for each activity and subject
##
runAnalysisStep5 <- function(mergedData) {
meltedData <- melt(mergedData, c("Subject.ID", "Activity.Label"))
dcast(meltedData, Subject.ID + Activity.Label ~ variable, mean)
} |
10ad53c2389104078729234c5b010e0e7d825e43 | 6afc0f2d60331b8ff37f331a0e07100c6b2173a6 | /visualize.R | 890ca33894714a61c1a239245199caa087f61c1a | [] | no_license | bkandel/KandelSparseRegressionIPMI | ca20f8bb084ba0516b8842f0a3148be85a217458 | defb74187303aee20b430b920cea7922363dbed1 | refs/heads/master | 2021-01-17T13:20:10.816467 | 2013-07-01T05:57:06 | 2013-07-01T05:57:06 | 8,888,803 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,943 | r | visualize.R | require(ANTsR)
glass <- antsImageRead('data/template/glassbrain.nii.gz', 3)
wm <- antsImageRead('data/template/WM.nii.gz', 3)
leftright <- antsImageRead('data/template/leftright.nii.gz', 3)
lateralLeft <- rotationMatrix(pi/2, 0, -1, 0) %*% rotationMatrix(pi/2, -1, 0, 0)
sagittalLeft <- rotationMatrix(-pi/2, 0, -1, 0) %*% rotationMatrix(pi/2, -1, 0, 0)
lateralRight <- rotationMatrix(-pi/2, 0, -1, 0) %*% rotationMatrix(pi/2, -1, 0, 0)
sagittalRight <- rotationMatrix(pi/2, 0, -1, 0) %*% rotationMatrix(pi/2, -1, 0, 0)
wm.left <- maskImage(wm, leftright, 1)
wm.right <- maskImage(wm, leftright, 2)
glass.left <- maskImage(glass, leftright, 1)
glass.right <- maskImage(glass, leftright, 2)
for (VOI in c("bnt", "wordlisttotal", "age")){
for (sparsity in c(01, 03, 05, 07, 10)){
eigenvectors.left <- list()
eigenvectors.right <- list()
for (eigvec.number in 5:0) {
i = eigvec.number + 1 # R lists start at 1, not 0
eigvec <- antsImageRead(paste('data/precomputed/', VOI, 'Sparse',
sprintf('%.2i', sparsity),
'Cluster200TrainView1vec', sprintf('%.3i', eigvec.number), '.nii.gz', sep=''), 3)
eigenvectors.left[[i]] <- maskImage(eigvec, leftright, 1)
eigenvectors.right[[i]] <- maskImage(eigvec, leftright, 2)
if(length(eigenvectors.left[[i]][eigenvectors.left[[i]] > 0]) == 0){
eigenvectors.left[[i]] <- NULL # delete eigenvectors with only zeros
}
if(length(eigenvectors.right[[i]][eigenvectors.right[[i]] > 0]) == 0){
eigenvectors.right[[i]] <- NULL
}
}
vis.left <- renderSurfaceFunction( list( wm.left, glass.left ),
eigenvectors.left, surfval=0.5, alphasurf=c(1, 0.2),
basefval = 1.5, alphafunc=1)
par3d(userMatrix=lateralLeft, windowRect=c(25,25,325,325), zoom=0.8 )
rgl.snapshot(paste('fig/precomputed/', VOI, 'Sparse', sparsity,
'Cluster200', '_lateral_left.png', sep='') )
par3d(userMatrix=sagittalLeft, windowRect=c(25,25,325,325), zoom=0.9)
rgl.snapshot(paste('fig/precomputed/', VOI, 'Sparse', sparsity,
'Cluster200', '_sagittal_left.png', sep='') )
if(length(eigenvectors.right ) > 0 ) {
vis.right <- renderSurfaceFunction(list(wm.right, glass.right),
eigenvectors.right, surfval=0.5, alphasurf=c(1, 0.2),
basefval=1.5, alphafunc=1)
par3d(userMatrix=lateralRight, windowRect=c(25,25,325,325), zoom=0.8 )
rgl.snapshot(paste('fig/precomputed/', VOI, 'Sparse', sparsity,
'Cluster200', '_lateral_right.png', sep='') )
par3d(userMatrix=sagittalRight, windowRect=c(25,25,325,325), zoom=0.9)
rgl.snapshot(paste('fig/precomputed/', VOI, 'Sparse', sparsity,
'Cluster200', '_sagittal_right.png', sep='') )
}
}
} |
51d3129ccba23d0d1836411de6813beb232bf955 | 4ccbb995c2336984e5538bd5481eb409ba919124 | /Lessons/Lesson2/Homework/ui.R | 216ae0bef7d6a78bc19be3ba293ad2a12a863a62 | [] | no_license | MatejBreja/GeneralInsurance_Class | 665aef47be6d481c895a12c4f39758fe63dca745 | baf22920ad9c968244b65ab5afcd0d063d603032 | refs/heads/master | 2021-10-15T08:32:43.986588 | 2019-02-05T14:44:19 | 2019-02-05T14:44:19 | 116,681,972 | 1 | 1 | null | 2019-02-05T14:44:32 | 2018-01-08T13:45:15 | HTML | UTF-8 | R | false | false | 222 | r | ui.R |
# Use a fluid Bootstrap layout
fluidPage(
# Give the page a title
titlePanel("Lesson 1 - Homework"),
sidebarLayout(
sidebarPanel(
),
mainPanel(
)
)
) |
a50a992e1144749a67c02565a53dbd4143b1505b | 18a140166805cb23863470428c612a1c8c860f21 | /man/nlf_2pl.Rd | 96fd8b27255458ebe3573a8ee0bdccb9a21eb574 | [] | no_license | nmolanog/bayesuirt | 632e8106ddddbb5c8a59204f5f72479bf6abe352 | ce3d93a38939459a1d92c459c1a09df4d4db7463 | refs/heads/master | 2020-03-23T20:16:13.357649 | 2018-11-28T21:13:05 | 2018-11-28T21:13:05 | 142,031,682 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,293 | rd | nlf_2pl.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bayesuirt.R
\name{nlf_2pl}
\alias{nlf_2pl}
\title{nlf_2pl Function}
\usage{
nlf_2pl(X, psi, z, theta, nitems)
}
\arguments{
\item{X}{design matrix indexing each observation to corresponding item}
\item{psi}{vector of item parameters}
\item{z}{design matrix indexing each observation to corresponding individual}
\item{theta}{vector of latent traits (i.e. random effects associated to individual level)}
\item{nitems}{integer especifying the number of items. It is used to separate parameters as psi[1:nitems] and psi[(nitems+1):(2*nitems)]}
}
\value{
a double vector
}
\description{
this functions is the non-linear function associated with the 2pl uirt model as estated in molano thesis.
}
\examples{
data("test_data")
temp<-uirt_DM(test_data,"id")
nitems<-ncol(temp$X)
nind<-ncol(temp$Z)
alpha<-runif(nitems,0.6,1.5) ##simulation of discrimination parameters
beta<-runif(nitems,-2.4,2.4) ##simulation of dificulty parameters
theta<-rnorm(nind) ##simulation of random effects
psi<-c(-alpha*beta,log(alpha)) ###vector of parameters based on alpha and beta
res<-nlf_2pl(temp$X,psi,temp$Z,theta,nitems) ###nonlinear function evaluated at given values
}
\keyword{a}
\keyword{reals}
\keyword{vector}
\keyword{with}
|
fb88e20e2197640dfcf0435e668ab9d461aa6a61 | bcd5f3fcf2ea9cf0ff8a1a0647350d4d7ba1f39e | /data_cleaning_and_analyzing.R | 663c2e75cb07b2c9ee658b32d96ce080b64b64ff | [] | no_license | davisj10/snowfall-prediction-nj | ba422ecddefe448c3606a19ef11fc1164c99a317 | 68054de2ba6a43b2fb46e2549ca71ec121fb54cc | refs/heads/master | 2022-07-06T12:47:26.839760 | 2020-05-14T18:04:17 | 2020-05-14T18:04:17 | 263,974,636 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 19,538 | r | data_cleaning_and_analyzing.R | # author: Justin Davis
# Data Mining I Project
# loading packages
library(plyr)
library(readr)
library(weathermetrics)
library(dplyr)
library(lubridate)
library(ggplot2)
library(reshape2)
library(ggmap)
library(mapproj)
library(devtools)
library(muRL)
install_github('arilamstein/choroplethrZip@v1.4.0')
library(choroplethrZip)
library(tibble)
library(tidyverse)
library(cluster)
library(factoextra)
library(FNN)
library(caret)
library(usedist)
# set seed here
set.seed(10)
### DEFINING FUNCTIONS HERE ###
# function to read in each csv and add a column for the zip code
read_csv_filename <- function(filename){
ret <- read.csv(filename, stringsAsFactors = FALSE)
ret$Zip <- regmatches(filename, regexpr("[0-9][0-9][0-9][0-9][0-9]", filename))
ret
}
# Get lower triangle of correlation matrix
get_lower_tri<-function(cormat){
cormat[upper.tri(cormat)] <- NA
return(cormat)
}
# Get upper triangle of the correlation matrix
get_upper_tri <- function(cormat){
cormat[lower.tri(cormat)]<- NA
return(cormat)
}
# function to normalize
normalize <- function(x) {
return ((x - min(x)) / (max(x) - min(x)))
}
### LOADING DATA AND PERFORMING CLEANING ###
# loading all data for all zip codes
mydir = "C:/Users/jadtr/Desktop/School/Spring 2020/Data Mining I/Project"
myfiles = list.files(path=mydir, pattern="edit.csv", full.names=TRUE)
#myfiles
# combining all zip codes into one data frame
data = ldply(myfiles, read_csv_filename)
str(data)
# any NA values -- returns false so we are good!
any(is.na(data))
# convert date_time column to character, so we can convert a date object later on
data$date_time <- as.character(data$date_time)
# preview the result
head(data)
# remove unnecessary columns
# some columns have repeat data and some should not have any noticable impact on predictions
data = subset(data, select = -c(visibility, uvIndex.1,
moon_illumination, moonrise, moonset, sunrise,
sunset, tempC, FeelsLikeC))
head(data)
# renaming date column
data = rename(data, c("Date" = date_time))
head(data)
# converting celsius columns to fahrenheit
data = rename(data, c("MaxTempF" = maxtempC, "MinTempF" = mintempC, "DewPointF" = DewPointC,
"HeatIndexF" = HeatIndexC, "WindChillF" = WindChillC))
head(data)
data$MaxTempF <- celsius.to.fahrenheit(data$MaxTempF, round = 1)
data$MinTempF <- celsius.to.fahrenheit(data$MinTempF, round = 1)
data$DewPointF <- celsius.to.fahrenheit(data$DewPointF, round = 1)
data$HeatIndexF <- celsius.to.fahrenheit(data$HeatIndexF, round = 1)
data$WindChillF <- celsius.to.fahrenheit(data$WindChillF, round = 1)
# converting dates from characters to dates
data$Date <- as.Date(data$Date, format = "%Y-%m-%d")
# convert snow totals and precipitation to inches
data = rename(data, c("TotalSnow_IN" = totalSnow_cm, "PrecipIN" = precipMM))
data$TotalSnow_IN <- metric_to_inches(data$TotalSnow_IN, unit.from = "cm", 3)
data$PrecipIN <- metric_to_inches(data$PrecipIN, unit.from = "mm", 3)
head(data)
# add column for average temp
data$AvgTempF <- ((data$MaxTempF + data$MinTempF)/2)
head(data)
### AGGREGATION BEING DONE HERE ###
# aggregating the daily forecasts to get monthly and yearly data sets
snow_year_month_zip_date <- data %>%
group_by(Zip, yearMonth = floor_date(Date, "month")) %>%
summarize(snow=sum(TotalSnow_IN), prec=sum(PrecipIN), minTempF = min(MinTempF),
maxTempF = max(MaxTempF), avgTempF = mean(AvgTempF), avgSunHours = mean(sunHour),
avgUVIndex = mean(uvIndex), avgDewPointF = mean(DewPointF), avgHeatIndexF = mean(HeatIndexF),
avgWindChillF = mean(WindChillF), avgWindGustKmph = mean(WindGustKmph), avgWindSpeedKmph = mean(windspeedKmph),
avgWindDir = mean(winddirDegree), avgPressure = mean(pressure), avgHumidity = mean(humidity), avgCloudCover = mean(cloudcover))
# reformatting the date to get rid of the day
snow_year_month_zip <- snow_year_month_zip_date
snow_year_month_zip$yearMonth <- format(snow_year_month_zip$yearMonth, "%Y-%m")
snow_year_month_zip <- as.data.frame(snow_year_month_zip)
#snow_year_month_zip
# aggregating the daily forecasts to get yearly data sets
snow_year_zip <- data %>%
group_by(Zip, year = floor_date(Date, "year")) %>%
summarize(snow=sum(TotalSnow_IN), prec=sum(PrecipIN), minTempF = min(MinTempF),
maxTempF = max(MaxTempF), avgTempF = mean(AvgTempF), avgSunHours = mean(sunHour),
avgUVIndex = mean(uvIndex), avgDewPointF = mean(DewPointF), avgHeatIndexF = mean(HeatIndexF),
avgWindChillF = mean(WindChillF), avgWindGustKmph = mean(WindGustKmph), avgWindSpeedKmph = mean(windspeedKmph),
avgWindDir = mean(winddirDegree), avgPressure = mean(pressure), avgHumidity = mean(humidity), avgCloudCover = mean(cloudcover))
# reformatting the date to get rid of the month and day
snow_year_zip$year <- format(snow_year_zip$year, "%Y")
snow_year_zip <- as.data.frame(snow_year_zip)
#snow_year_zip
### THE DATA AGGREGATED SO FAR NOW NEEDS TO BE GROUPED INTO SEASONS
## for instance, snow season of 2009-2010 goes from Oct 2009 - Mar 2010
# the data for month and year grouping has been computed already
seasons_year_zip <- snow_year_month_zip_date
# don't have the rest of the data for the 2008-2009 season, so get rid of the 2009 portion
seasons_year_zip <- seasons_year_zip[!(seasons_year_zip$yearMonth == "2009-01-01") & !(seasons_year_zip$yearMonth == "2009-02-01") & !(seasons_year_zip$yearMonth == "2009-03-01"),]
# extract the month portion of the date
t <- format(seasons_year_zip$yearMonth, "%m")
# place it back into dataset
seasons_year_zip$yearMonth <- t
# set up a condition to only use months within the season
condition <- seasons_year_zip$yearMonth %in% c("01", "02", "03", "10", "11", "12")
# keep rows that are true
seasons_year_zip <- seasons_year_zip[condition,]
# now, aggregate all variables for every 6 rows - each 6 rows is a season for a zip code
grouped_seasons <- seasons_year_zip %>%
group_by(Zip, season = as.integer(gl(n(), 6, n()))) %>%
summarize(snow=sum(snow), prec=sum(prec), minTempF = min(minTempF),
maxTempF = max(maxTempF), avgSunHours = mean(avgSunHours),
avgUVIndex = mean(avgUVIndex), avgDewPointF = mean(avgDewPointF), avgHeatIndexF = mean(avgHeatIndexF),
avgWindChillF = mean(avgWindChillF), avgWindGustKmph = mean(avgWindGustKmph), avgWindSpeedKmph = mean(avgWindSpeedKmph),
avgWindDir = mean(avgWindDir), avgPressure = mean(avgPressure), avgHumidity = mean(avgHumidity), avgCloudCover = mean(avgCloudCover))
# checking it worked
head(grouped_seasons,12)
# convert season numbers to year ranges
seasons <- c("2009-2010", "2010-2011", "2011-2012", "2012-2013", "2013-2014", "2014-2015", "2015-2016", "2016-2017", "2017-2018", "2018-2019", "2019-2020")
s <- grouped_seasons$season
s <- seasons[s]
grouped_seasons$season <- s
# check it worked
head(grouped_seasons$season)
### PERFORMING BASIC STAT CALCULATIONS AND GRAPHS ###
# plotting the year versus snow amounts for all zip codes
ggplot(data = grouped_seasons, aes(x = season, y = snow, colour = Zip)) + geom_point() +
theme(axis.text.x = element_text(angle = 60, hjust = 1)) +
ggtitle("Snow Totals per Season and Zip Code")
# plotting the year versus average temp for all zip codes
ggplot(data = grouped_seasons, aes(x = season, y = abs(maxTempF-minTempF)/2, colour = Zip)) + geom_point() +
ylab("Avg Temp (F)") +
geom_hline(yintercept = c(), color="blue") +
ggtitle("Average Temp per Season and Zip") +
theme(axis.text.x = element_text(angle = 60, hjust = 1))
## we can see from this that the temperatures never really changes much year to year, however,
## for 2020, the average temperature is much lower -- this is because there is only data for the
## first 3 months, which are some of the coldest
## plotting one more time with precipiation amounts
ggplot(data = grouped_seasons, aes(x = season, y = prec, colour = Zip)) + geom_point() +
ggtitle("Precipitation per Season and Zip") +
theme(axis.text.x = element_text(angle = 60, hjust = 1))
## let's plot using season date now
## let's examine the snow fall totals at each zip for each season
ggplot(data = grouped_seasons, aes(x = Zip, y = snow, colour = Zip)) + geom_point() + facet_wrap(~season) + theme(axis.text.x=element_blank()) +
ggtitle("Snow Totals for Season over All Seasons")
### let's search for some specific features now ###
# 1 - the highest snow amount in a year and where it was (let's extract the whole row!)
# the highest amount should be in north jersey since they typically get more snow
max_snow <- grouped_seasons[which.max(grouped_seasons$snow),]
max_snow
# examining it on the map, this point is right near the northernmost point of NJ - this makes sense!
# 2 - now let's look at the lowest snowfall total
min_snow <- grouped_seasons[which.min(grouped_seasons$snow),]
min_snow
## again, this gives us a point closest to the bottom of NJ and near the shore
### Plotting of all zip codes we will be using here along with snow totals for each zip code
# plotting the zip codes on the map
uniqueZips <- distinct(data, Zip)
# rename Zip to zip so function can plot the zips
uniqueZips <- rename(uniqueZips, c("zip" = Zip))
#uniqueZips - list the zip codes
# plot zips on map of NJ
zip.plot(uniqueZips, map.type = "county", region = "new jersey", col = "red", pch = 20)
# plotting snow totals on map of nj based on zip codes
# get the average snow fall amount from all the seasons
zip_snow <- grouped_seasons %>%
group_by(region = Zip) %>%
summarize(value = mean(snow,2))
# create a condition to remove zip codes that cannot be mapped
condition2 <- zip_snow$region %in% c("08645", "08754", "08803", "08875", "08906")
# remove bad zip codes
zip_snow <- zip_snow[!condition2,]
# create a map of NJ to show the average snowfall total
# maps values to zip code sections - sections with no data will be grouped together
# this shows totals with boundaries
zip_choropleth(zip_snow,
state_zoom = "new jersey",
title = "2009-2020 Average Snow Total Per Zip",
legend = "Snow (Inches)") + coord_map()
data("zip.regions")
# same plot as before, but gets rid of boundary lines
choro = choroplethrZip::ZipChoropleth$new(zip_snow)
choro$prepare_map()
choro$legend = "Snowfall (Inches)"
ec_zips = zip.regions[zip.regions$state.name %in% "new jersey", "region"]
ec_df = choro$choropleth.df[choro$choropleth.df$region %in% ec_zips, ]
ec_plot = choro$render_helper(ec_df, "", choro$theme_clean()) +
ggtitle("2009-2020 Average Snow Total Per Zip")
ec_plot + coord_map()
### CLUSTERING DONE HERE
## first normalize all data rows - all are numeric, so it will make it easier!
# aggregate first to get average amounts for each zip code over all seasons
average_seasons <- grouped_seasons %>%
group_by(Zip) %>%
summarize(snow=mean(snow), prec=mean(prec), minTempF = min(minTempF),
maxTempF = max(maxTempF), avgSunHours = mean(avgSunHours),
avgUVIndex = mean(avgUVIndex), avgDewPointF = mean(avgDewPointF), avgHeatIndexF = mean(avgHeatIndexF),
avgWindChillF = mean(avgWindChillF), avgWindGustKmph = mean(avgWindGustKmph), avgWindSpeedKmph = mean(avgWindSpeedKmph),
avgWindDir = mean(avgWindDir), avgPressure = mean(avgPressure), avgHumidity = mean(avgHumidity), avgCloudCover = mean(avgCloudCover))
# normalize all columns using the nomralize function to get values between 0 and 1
normalized <- data.frame(average_seasons$Zip, apply(average_seasons[,2:16], 2, scale))
normalized = rename(normalized, c("Zip" = 1))
# change row names to zip code for distance matrix calculations
normalized_rows <- normalized[,-1]
rownames(normalized_rows) <- normalized[,1]
# now let's get the distance matrix!
distance <- get_dist(normalized_rows)
# display the distance matrix
fviz_dist(distance, show_labels = TRUE, lab_size = 7)
# display part of the distance matrix to show on powerpoint
fviz_dist(dist_subset(distance, c(1:15,1:15)), show_labels = TRUE, lab_size = 9)
# examine the elbow graph to determine best k - between 2 or 4
fviz_nbclust(normalized_rows, kmeans, method = "wss")
# silhouette shows 2 as the best, with 4 & 10 as close seconds
fviz_nbclust(normalized_rows, kmeans, method = "silhouette")
# now let's compute the clustering - with a k value of 4
set.seed(10)
k4 <- kmeans(normalized_rows, centers = 4, nstart = 25)
# plot the clusters
s <- fviz_cluster(k4, data = normalized_rows)
# k4$cluster - shows the zips and cluster numbers
# add cluster number as a column and seperate rows based on cluster
clustered_data <- cbind(uniqueZips, clusterNum = k4$cluster)
cluster1 <- clustered_data[clustered_data$clusterNum == 1,]
cluster2 <- clustered_data[clustered_data$clusterNum == 2,]
cluster3 <- clustered_data[clustered_data$clusterNum == 3,]
cluster4 <- clustered_data[clustered_data$clusterNum == 4,]
# plot to see where they are side-by-side
par(mfrow=c(1,4))
clust_map_1 <- zip.plot(cluster1, map.type = "county", region = "new jersey", col = "red", pch = 20, cex = 2) + title("Cluster 1")
clust_map_2 <- zip.plot(cluster2, map.type = "county", region = "new jersey", col = "green", pch = 20, cex = 2) + title("Cluster 2")
clust_map_3 <- zip.plot(cluster3, map.type = "county", region = "new jersey", col = "blue", pch = 20, cex = 2) + title("Cluster 3")
clust_map_4 <- zip.plot(cluster4, map.type = "county", region = "new jersey", col = "purple", pch = 20, cex = 2) + title("Cluster 4")
### CORRELATION AND PREDICTION BELOW
### graph correlation between variables - make a heatmap
## code inspired from: http://www.sthda.com/english/wiki/ggplot2-quick-correlation-matrix-heatmap-r-software-and-data-visualization
# remove the zip and season columns
num_seasons <- grouped_seasons[,-c(1:2)]
# first normalize the variables
num_seasons <- data.frame(apply(num_seasons, 2 , scale))
# find correlation values between all variables
cormat <- round(cor(num_seasons),2)
# find correlation values that affect snow totals
cormat_snow <- round(cor(num_seasons, num_seasons$snow),2)
# get values in lower triangle and only display them
lower_tri <- get_lower_tri(cormat)
heat_map <- melt(lower_tri, na.rm = TRUE)
# plot a heat map of the correlation values
ggplot(data = heat_map, aes(x=Var1, y=Var2, fill=value)) +
geom_tile(color = "white") +
scale_fill_gradient2(low = "blue", high = "red", mid = "white", midpoint = 0, limit = c(-1,1), space = "Lab", name="Pearson\nCorrelation") +
theme_minimal() + theme(axis.text.x = element_text(angle = 60, hjust = 1)) +
ggtitle("Correlation between Variables") +
coord_fixed()
### k-nearest neighbor regression to predict snowfall amounts
# first sort rows by season
sorted_seasons <- grouped_seasons[order(grouped_seasons$season),]
# remove zip code and season column - not needed
knn_data <- sorted_seasons[,-c(1:2)]
# select the variables that have a positive pearson correlation
knn_data <- subset(knn_data, select = c("snow", "prec", "maxTempF", "avgWindDir", "avgPressure", "avgCloudCover"))
# scale the data down
knn_data_scaled <- apply(knn_data, 2, scale)
# convert to data frame
knn_data <- data.frame(knn_data_scaled)
# split our data into testing and training data - ~90% training and ~10% testing
# this model trains on the first 10 seasons and predicts the last 1
smp_ind <- floor(nrow(knn_data)-1*69)
# get testing and training sets
train_data <- subset(knn_data[1:smp_ind,], select = -c(snow))
test_data <- subset(knn_data[(smp_ind+1):nrow(knn_data),], select = -c(snow))
# getting the actual outcome values for both sets
snow_outcome <- knn_data %>% select(snow)
train_outcome <- as.data.frame(snow_outcome[1:smp_ind,])
train_outcome = rename(train_outcome, c("snow" = 1))
test_outcome <- as.data.frame(snow_outcome[(smp_ind+1):nrow(knn_data),])
test_outcome = rename(test_outcome, c("snow" = 1))
### Plot model accuracy vs different values of k
# create a data frame to store accuracy for each value of k
# trying k-values from 1-100 (excluding 2)
k <- as.data.frame(c(1,3:100))
# rename column
k = rename(k, "k" = c(1))
# add an accuracy column that is initially 0
k$accuracy <- 0
for (i in k$k) {
# perform regression here
results <- knn.reg(train_data, test_data, train_outcome, k = i)
# get predicted values
pred_values <- results[["pred"]]
# add actual test values to data set
examine <- test_outcome
# rename the column
examine = rename(examine, "actual" = c(1))
## need to unscale data now
snow_scale_mean <- mean(sorted_seasons$snow)
snow_scale_sd <- sd(sorted_seasons$snow)
unscaled_pred <- pred_values * snow_scale_sd + snow_scale_mean
unscaled_actual <- test_outcome * snow_scale_sd + snow_scale_mean
# add unscaled values back
examine$actual <- as.numeric(unlist(unscaled_actual))
# add predicted values
examine$pred <- unscaled_pred
# add the differences as a column
examine$diff <- abs(examine$actual-examine$pred)
# create a column for error
examine$error <- (examine$diff/examine$actual)*100
examine$error <- as.double(examine$error, 2)
# get number of good guesses
numGood <- length(which(examine$error <= 30))
# get the percentage correct (accuracy)
percGood <- (numGood/nrow(test_data))*100
if(i != 1) {
k$accuracy[i-1] <- percGood
}
else { # skipped k = 2
k$accuracy[i] <- percGood
}
}
# plot the accuracy versus k-value for all the k-values we tested
ggplot(data = k, aes(k, accuracy)) + geom_point() + geom_line(color = "red") + ggtitle("Accuracy of k-values") +
theme(plot.title = element_text(hjust = 0.5))
# determine the best k from those tested
best_k <- k$k[which.max(k$accuracy)]
# get the accuracy
k$accuracy[best_k-1]
# get the zip codes for the test set
test_zips <- sorted_seasons[(smp_ind+1):nrow(knn_data),]$Zip
# add the zip codes to the examine data set
examine$zips <- test_zips
# add a group variable so can tell if it was a good prediction or not
examine$group <- ifelse(examine$error >= 30, 1, 0)
# plot the error for the 2019-2020 season
ggplot(data = examine, aes(fill = factor(group), x = zips, y = error)) +
geom_bar(stat="identity") +
ggtitle("Prediction Error for Season 2019-2020 with k = 52") +
xlab("Zip") + ylab("% error") +
theme(axis.text.x = element_text(angle = 60, hjust = 1), axis.text=element_text(size=6)) +
geom_hline(yintercept=30, linetype="dashed", color = "red", size = 1) +
scale_fill_manual(name="Predictions",
labels=c("Good","Bad"),
values=c("royalblue4","red4"))
# plotting good and bad zips on the map
good_zips <- subset(data.frame(examine[examine$group == 0,]), select = c("zips"))
bad_zips <- subset(data.frame(examine[examine$group == 1,]), select = c("zips"))
par(mfrow=c(1,2))
gz_map <- zip.plot(good_zips, map.type = "county", region = "new jersey", col = "royalblue4", pch = 20, cex = 1.5) + title("Good Zips")
bz_map <- zip.plot(bad_zips, map.type = "county", region = "new jersey", col = "red4", pch = 20, cex = 1.5) + title("Bad Zips")
|
5a523aafccd3f51eb9df7efe893e69ebc224df32 | 0d3a95f2f32842a5bc7626c101d524f0144de8af | /R/animation.R | 401626be391f66a3fbd10907b0e1da2256a01315 | [] | no_license | Jun-Lizst/r3dmol | 85943c441d1d992fa02668709b0130480c926d64 | 57883d10b598eddc5a1d81609b2ce19d56160982 | refs/heads/master | 2023-08-04T12:34:11.203731 | 2021-09-16T15:45:27 | 2021-09-16T15:45:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,426 | r | animation.R | #' Rotate scene by angle degrees around axis
#'
#' @param id R3dmol \code{id} or a \code{r3dmol} object (the output from
#' \code{r3dmol()})
#' @param angle Angle, in degrees \code{numeric}, to rotate by.
#' @param axis Axis (\code{"x"}, \code{"y"}, \code{"z"}, \code{"vx"},
#' \code{"vy"}, \code{"vz"})
#' to rotate around. Default \code{"y"}. View relative (rather than model
#' relative) axes are prefixed with \code{"v"}. Axis can also be specified as a
#' vector.
#' @param animationDuration an optional parameter of milliseconds \code{numeric}
#' that denotes the duration of the rotation animation. Default \code{0} (no
#' animation)
#' @param fixedPath if \code{true} animation is constrained to
#' requested motion, overriding updates that happen during the animation
#'
#' @return R3dmol \code{id} or a \code{r3dmol} object (the output from
#' \code{r3dmol()})
#' @export
#'
#' @examples
#' library(r3dmol)
#' r3dmol() %>%
#' m_add_model(data = pdb_6zsl, format = "pdb") %>%
#' m_rotate(angle = 90, axis = "y", animationDuration = 1000)
m_rotate <- function(id, angle, axis = "v", animationDuration = 0, fixedPath) {
if (!axis %in% c("x", "y", "z", "vx", "vy", "vz") &&
class(axis) != "Vector3") {
stop("Unknow axis.")
}
method <- "rotate"
callJS()
}
#' Continuously rotate a scene around the specified axis
#'
#' @param id R3dmol \code{id} or a \code{r3dmol} object (the output from
#' \code{r3dmol()})
#' @param axis Axis (\code{"x"}, \code{"y"}, \code{"z"}, \code{"vx"},
#' \code{"vy"}, \code{"vz"})
#' to rotate around. Default \code{"y"}. View relative (rather than model
#' relative) axes are prefixed with \code{"v"}.
#' @param speed Speed multiplier for spin animation. Defaults to 1. Negative
#' value reverses the direction of spin.
#'
#' @return R3dmol id or a \code{r3dmol} object (the output from \code{r3dmol()})
#' @export
#'
#' @examples
#' library(r3dmol)
#' model <- r3dmol() %>%
#' m_add_model(data = pdb_6zsl, format = "pdb") %>%
#' m_set_style(style = m_style_cartoon(color = "spectrum")) %>%
#' m_zoom_to()
#'
#' # spin the model
#' model %>% m_spin()
#'
#' # reverses the direction of spin
#' model %>% m_spin(speed = -0.5)
m_spin <- function(id, axis = "y", speed = 1) {
if (!axis %in% c("x", "y", "z", "vx", "vy", "vz")) {
stop("Unknow axis.")
}
if (!is.numeric(speed)) {
stop("Speed multiplier must be numeric.")
}
method <- "spin"
callJS()
}
#' Stop animation of all models in viewer
#'
#' @param id R3dmol \code{id} or a \code{r3dmol} object (the output from
#' \code{r3dmol()})
#'
#' @export
#'
m_stop_animate <- function(id) {
method <- "stopAnimate"
callJS()
}
#' @rdname m_translate
#' @export
m_translate <- function(id, x, y, animationDuration, fixedPath) {
method <- "translate"
callJS()
}
#' @rdname m_translate
#' @export
m_translate_scene <- function(id, x, y, animationDuration, fixedPath) {
method <- "translateScene"
callJS()
}
#' Zoom current view by a constant factor
#'
#' @param id R3dmol \code{id} or a \code{r3dmol} object (the output from
#' \code{r3dmol()})
#' @param factor Magnification \code{numeric} factor. Values greater than
#' \code{1} will
#' zoom in, less than one will zoom out. Default \code{2}.
#' @param animationDuration an optional parameter of milliseconds \code{numeric}
#' that denotes the duration of a zoom animation
#' @param fixedPath if \code{true} animation is constrained to
#' requested motion, overriding updates that happen during the animation
#'
#' @return R3dmol \code{id} or a \code{r3dmol} object (the output from
#' \code{r3dmol()})
#' @export
#'
#' @examples
#' library(r3dmol)
#'
#' r3dmol() %>%
#' m_add_model(data = pdb_6zsl, format = "pdb") %>%
#' m_zoom_to() %>%
#' m_zoom(factor = 2, animationDuration = 1000)
m_zoom <- function(id, factor = 2, animationDuration, fixedPath) {
method <- "zoom"
callJS()
}
#' Zoom to center of atom selection
#'
#' Zoom to center of atom selection. The slab will be set appropriately for
#' the selection, unless an empty selection is provided, in which case there
#' will be no slab.
#'
#' @param id R3dmol \code{id} or a \code{r3dmol} object (the output from
#' \code{r3dmol()})
#' @param sel Selection specification specifying model and atom
#' properties to select. Default: all atoms in viewer.
#' @param animationDuration an optional parameter of milliseconds \code{numeric}
#' that denotes the duration of a zoom animation
#' @param fixedPath if \code{true} animation is constrained to
#' requested motion, overriding updates that happen during the animation
#'
#' @return R3dmol \code{id} or a \code{r3dmol} object (the output from
#' \code{r3dmol()})
#' @export
#'
#' @examples
#' library(r3dmol)
#'
#' r3dmol() %>%
#' m_add_model(data = pdb_6zsl, format = "pdb") %>%
#' m_zoom_to()
m_zoom_to <- function(id, sel, animationDuration, fixedPath) {
method <- "zoomTo"
callJS()
}
#' Add model's vibration
#'
#' If atoms have dx, dy, dz properties (in some xyz files),
#' vibrate populates each model's frame property based on parameters.
#' Models can then be animated.
#'
#' @param id R3dmol \code{id} or a \code{r3dmol} object (the output from
#' \code{r3dmol()})
#' @param numFrames Number of frames to be created, default to 10
#' @param amplitude Amplitude of distortion, default to 1 (full)
#' @param bothWays If true, extend both in positive and negative directions by
#' numFrames
#' @param arrowSpec Specification for drawing animated arrows. If color isn't
#' specified,
#' atom color (sphere, stick, line preference) is used.
#'
#' @return R3dmol \code{id} or a \code{r3dmol} object (the output from
#' \code{r3dmol()})
#' @export
#'
#' @examples
#' library(r3dmol)
#'
#' xyz <- "4
#' * (null), Energy -1000.0000000
#' N 0.000005 0.019779 -0.000003 -0.157114 0.000052 -0.012746
#' H 0.931955 -0.364989 0.000003 1.507100 -0.601158 -0.004108
#' H -0.465975 -0.364992 0.807088 0.283368 0.257996 -0.583024
#' H -0.465979 -0.364991 -0.807088 0.392764 0.342436 0.764260
#' "
#'
#' r3dmol() %>%
#' m_add_model(data = xyz, format = "xyz") %>%
#' m_set_style(style = m_style_stick()) %>%
#' m_vibrate(numFrames = 10, amplitude = 1) %>%
#' m_animate(options = list(loop = "backAndForth", reps = 0)) %>%
#' m_zoom_to()
m_vibrate <- function(id, numFrames, amplitude, bothWays, arrowSpec) {
method <- "vibrate"
callJS()
}
|
203206e10c081e5851ac7ae13f1c2802791ea072 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/tswge/examples/fig12.1b.Rd.R | 656bc9482b9712fd4a531e86a3dbfd58c1898789 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 282 | r | fig12.1b.Rd.R | library(tswge)
### Name: fig12.1b
### Title: Simulated data with two frequencies shown in Figure 12.1b in
### Applied Time Series Analysis with R, second edition by Woodward,
### Gray, and Elliott
### Aliases: fig12.1b
### Keywords: datasets
### ** Examples
data(fig12.1b)
|
c652ddfab1a23eca93d6dbac1cde707b3403b80c | 55bfb6f0c613d1beb67b40aa99e531eb644d4351 | /man/get_genomic_sequence.Rd | 4eff1a98017fcbbbdcb45e2593d613cba93fdd02 | [] | no_license | EricBryantPhD/mutagenesis | 3bc391acb86b4796eff0c2ae826d6c65af507d6f | 0fe642a2addf0734f31df29aa3be1c069cf420d2 | refs/heads/master | 2020-12-09T11:03:54.657411 | 2018-01-27T23:10:32 | 2018-01-27T23:10:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,533 | rd | get_genomic_sequence.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get-sequence.R
\name{get_genomic_sequence}
\alias{get_genomic_sequence}
\alias{get_genomic_variant}
\alias{get_coding_sequence}
\alias{get_coding_variant}
\title{Get genomic sequences given ranges}
\usage{
get_genomic_sequence(chr, strand, start, end, genome)
get_genomic_variant(chr, strand, start, end, vcf, genome)
get_coding_sequence(chr, strand, start, end, cds, genome)
get_coding_variant(chr, strand, start, end, cds, vcf, genome)
}
\arguments{
\item{chr}{\code{[character]}
Chromosome names. Must match names returned by \code{names(genome)}.}
\item{strand}{\code{[character]}
Sequence strands (+|-).}
\item{start}{\code{[integer]}
Start coordinates of ranges.}
\item{end}{\code{[integer]}
End coordinates of ranges}
\item{genome}{\code{[BSgenome|DNAStringSet]}
A reference genome. See Details.}
}
\description{
Get genomic sequences given ranges
}
\details{
The reference genome can be either a \code{BSgenome} object from a
BSgenome reference package (see \link[BSgenome:BSgenome]{BSgenome::BSgenome}), or a \code{DNAStringSet}
object (see \link[Biostrings:DNAStringSet]{Biostrings::DNAStringSet}). \code{BSgenome} objects offer faster
sequence aquisition, but are limited to existing BSgenome packages
(see \link[BSgenome:available.genomes]{BSgenome::available.genomes}) whereas \code{DNAStringSet} objects can
be easily created from any standard FASTA file using
\link[Biostrings:readDNAStringSet]{Biostrings::readDNAStringSet}.
}
|
7c0ab91b1aae039b0f85942bef253cc2fa7f050d | d7e4ec0af1285425481ee4c44d2658fdddaf04db | /List_to_Map_Geocoding.R | 6434df87d3fac8faed43a30dfe2aa43706dd4554 | [] | no_license | rubenmarcos/address_list_to_geocoding | b73129f7bf4fcb6b0825210d71efcef8add32538 | 20e15fab4076f4afea4899bb6d538a786a3c6aa2 | refs/heads/master | 2020-12-21T21:21:16.134073 | 2020-01-27T21:27:21 | 2020-01-27T21:27:21 | 236,565,819 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,381 | r | List_to_Map_Geocoding.R | library(ggmap)
# Loading list of addresses from CSV to data frame (easiest way to work).
# The main goal is having a list including enough data to be recognised by the geocoding tool and keeping other parameters for segmentation on the map.
# If other sources are required (DB or scrapped web content), change this line for the code required for the connection.
schools_madrid <- read.csv("colegios_madrid.csv", sep = ",", encoding = "ISO", stringsAsFactors = FALSE, header = TRUE)
# Paste Street and city in order to have a more detailed address to query the geocoder
schools_madrid$ADDRESS <- paste(schools_madrid$DOMICILIO,schools_madrid$MUNICIPIO)
# Geocoding API Key from Google Cloud Platform (Paid but some free credit available at signing up)
# Info on how to get an API key here: https://developers.google.com/maps/documentation/geocoding/get-api-key
register_google(key = "your_Geocoding_API_key_here")
# Loop for querying each different address and add latitude, longitude and geoAddress data to
for(i in 1:nrow(schools_madrid))
{
result <- geocode(schools_madrid$ADDRESS[i], output = "latlona", source = "google")
schools_madrid$lon[i] <- as.numeric(result[1])
schools_madrid$lat[i] <- as.numeric(result[2])
schools_madrid$geoAddress[i] <- as.character(result[3])
}
write.csv(schools_madrid,file = "colegios_madrid_location.csv")
|
ffc373c782972e08a73ea75c4e918928165e7367 | 6e5d78bb8fe6d0026e110a6c29c60a012f16e1ff | /Pratical_ML_Coursera/3. model based prediction (QDA,LDA,Naive) with iris example.R | a36358d71718becb0fd8db9439c51801f4d94be7 | [] | no_license | richarddeng88/Advanced_Data_Mining | b2d2b91e9a6f100fc4db8be0c6a53140ee64e6fe | ef386c9fa5293ad0ea69b779b36251b15f8b59f0 | refs/heads/master | 2021-01-15T15:45:23.576894 | 2016-10-22T22:02:42 | 2016-10-22T22:02:42 | 47,933,660 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 561 | r | 3. model based prediction (QDA,LDA,Naive) with iris example.R | data(iris); library(ggplot2)
names(iris)
inTrain <- createDataPartition(y=iris$Species,
p=0.7, list=FALSE)
training <- iris[inTrain,]
testing <- iris[-inTrain,]
dim(training); dim(testing)
#Build predictions
modlda = train(Species ~ .,data=training,method="lda")
modnb = train(Species ~ ., data=training,method="nb")
plda = predict(modlda,testing); pnb = predict(modnb,testing)
table(plda,pnb)
#Comparison of results
equalPredictions = (plda==pnb)
qplot(Petal.Width,Sepal.Width,colour=equalPredictions,data=testing)
|
e5485229169c01bf5bf8e676b5cde4a6f892162f | 2e80ba6835ceb74c0645b6d9bd77a31973739af8 | /Task1/Get_tweets.R | 7980dc2d2e5ddeccad2e21f660d0bbb7119de893 | [] | no_license | JatuFaique/Task1 | 6d8832c7898d6e49dcfe4235e42c4c8d567c870e | 42d1db6039eef4b80e293e6510a0af127ad20365 | refs/heads/master | 2022-07-14T07:42:51.060987 | 2020-05-13T08:23:59 | 2020-05-13T08:23:59 | 261,422,544 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 718 | r | Get_tweets.R | library(twitteR)
consumer_key <- "1CmDGAv9GKVK8jpq7RyN4MbNv"
consumer_key_secret <- "urqSMn7Q2Oquw0IirU21TIEYIOTvrR4Co3WwQoaubjc4ZzLd8M"
access_token <- "3224456528-pPGzt66TapKaPWcbRAoP6krK5vAg5wnRGNKQNsI"
access_token_secret <- "oH1ZU78KFXhWIOObZZgMz7whLPZPN5pECjJRqXhOrgU7u"
setup_twitter_oauth(consumer_key, consumer_key_secret,
access_token,access_token_secret)
read_hashtag_tweets <- function()
{
tag <- readline(prompt="Enter an hashtag: ")
hash <-"#"
tag_1 <<- paste(hash, tag , sep = "")
return(as.character(tag_1))
}
read_hashtag_tweets()
tweets <<- twitteR::searchTwitter(tag_1,n =1000,lang ="en",since = '2020-01-01')
df <- twListToDF(tweets)
|
cf804aaef98e4567b9c4a91a9ee0f801d8fd62b3 | 668ec1bdf97060e4eb6b4b7c9214896c07336181 | /script3.r | c4cf2d656d4a0c6ece964f60d04c0d7fc6e0f4af | [] | no_license | Leharis/SMPD-cw-3 | 421d12f40f438918d9c393c3af07b9b5c0bf5331 | ce61299093c854724039e90eee34190e1d21c743 | refs/heads/master | 2021-01-20T02:05:27.387262 | 2017-04-25T14:27:53 | 2017-04-25T14:27:53 | 89,370,061 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 155 | r | script3.r | library(ahp)
ahpFile <- system.file("extdata", "lodowki.ahp", package="ahp")
lodowkiAhp <- Load(ahpFile)
Calculate(lodowkiAhp)
AnalyzeTable(lodowkiAhp) |
0197f44ce3445fed5f7cf086b0fe68c2bff0ca56 | 66dd0b831434c4ad85bf58e473278883aeadbb1b | /analysis/barcode_effect_model.R | bd04ab59d4b1c98cf7d79182ead54b0abcdfd4b0 | [] | no_license | andrewGhazi/bayesianMPRA | 6e394893c7cfb079ca55698339eaa77c568a6736 | 4b9823f5a05b431adac9caae83737e525ae5d93c | refs/heads/master | 2021-01-20T03:34:35.703409 | 2018-09-17T07:41:10 | 2018-09-17T07:41:10 | 83,831,741 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 30,697 | r | barcode_effect_model.R | # I owe you the barcode normalization
#
# It goes like this . Take n values. Take the geometric mean of these values .
# Take the ith value . Divide the ith value by the geometric mean .
# Reciprocate that .
#
# If you use the reciprocate value above as a normalizing scale factor you can
# remove the barcode effects .
#
# For the n values I have in mind the library normalized median values of each
# barcode in the collection
#
# So for a given barcode , 1) take the depth normalized values in each
# replicate . 2) take the median of these values 3) apply the geometric mean
# based procedure above 4). Take the scale factors for barcodes that result and
# stick them into your modeling scheme
#
#
# Please let me know if you have questions or what you think
#
#
# Chad
library(tidyverse)
library(magrittr)
library(rstan)
# load("/mnt/bigData2/andrew/analysis_data/testing_dat.RData")
#
# sample_depths = mpra_data %>%
# unnest %>%
# select(-snp_id, -allele) %>%
# gather(sample, count) %>%
# group_by(sample) %>%
# summarise(depth = sum(count))
#
# snp_dat = mpra_data$count_data[[1]] %>%
# mutate(bc_id = 1:n())
#
# # For one barcode take the depth normalized values in each replicate .
#
# dnv = snp_dat %>%
# gather(sample, count, -allele, -bc_id) %>%
# left_join(sample_depths, by = 'sample') %>%
# mutate(depth_norm_count = 1e6 * count / depth)
#
# well_represented = dnv %>%
# filter(grepl('DNA', sample)) %>%
# group_by(allele, bc_id) %>%
# summarise(mean_depth_norm = mean(depth_norm_count)) %>%
# ungroup %>%
# filter(mean_depth_norm > 10)
#
# dnv %<>% filter(bc_id %in% well_represented$bc_id)
#
# # 2) take the median of these values
# medians = dnv %>%
# mutate(samp_type = if_else(grepl('DNA', sample), 'DNA', 'RNA')) %>%
# group_by(bc_id, samp_type) %>%
# summarise(med_dnv = median(depth_norm_count)) %>%
# ungroup %>%
# left_join(unique(select(dnv, bc_id, allele)),
# by = 'bc_id')
#
# # 3) apply the geometric mean based procedure above
# #bc_medians =
#
# # https://stackoverflow.com/questions/2602583/geometric-mean-is-there-a-built-in
# gm_mean = function(x, na.rm=TRUE){
# exp(sum(log(x[x > 0]), na.rm = na.rm) / length(x))
# }
#
# geo_means = medians %>%
# group_by(allele, samp_type) %>%
# summarise(geo_mean = gm_mean(med_dnv))
#
# bc_norm_factors = medians %>%
# left_join(geo_means, by = c('allele', 'samp_type')) %>%
# mutate(bc_norm_factor = (med_dnv / geo_mean)^(-1)) %>%
# select(bc_id, samp_type, bc_norm_factor) %>%
# mutate(samp_type = paste0(samp_type, '_norm_factor')) %>%
# spread(samp_type, bc_norm_factor)
#
# inputs = snp_dat %>%
# left_join(bc_norm_factors,
# by = 'bc_id') %>%
# select(allele, bc_id, DNA_norm_factor, RNA_norm_factor, everything())
#
# ## prior estimation copied from neg_bin_regression.R ----
#
# load('/mnt/bigData2/andrew/analysis_data/testing_dat_full.RData')
# mpra_data = ulirschCounts %>%
# group_by(snp_id) %>%
# nest
# #
# depth_factors = ulirschCounts %>%
# select(matches('NA')) %>%
# summarise_all(.funs = funs(sum(.) / 1e6)) %>%
# gather(sample, depth_factor)
# allele = mpra_data$count_data[[1]] %>%
# pull(allele) %>%
# {. != 'ref'} %>%
# as.integer() %>%
# {. + 1}
#
fit_nb = function(counts){
counts = counts$count
fn_to_min = function(param_vec){
# param_vec[1] nb mean
# param_vec[2] nb size
-sum(dnbinom(counts,
mu = param_vec[1],
size = param_vec[2],
log = TRUE))
}
stats::nlminb(start = c(100, 1),
objective = fn_to_min,
lower = rep(.Machine$double.xmin, 2))
}
fit_gamma = function(param_estimates){
fn_to_min = function(ab_vec){
-sum(dgamma(param_estimates,
shape = ab_vec[1],
rate = ab_vec[2],
log = TRUE))
}
stats::nlminb(start = c(1,1),
objective = fn_to_min,
lower = rep(.Machine$double.xmin, 2))
}
#
library(parallel)
# nb_param_estimates = mpra_data %>%
# unnest %>%
# gather(sample, count, matches('NA')) %>%
# group_by(snp_id, allele, sample) %>%
# nest %>%
# mutate(count_mean = map_dbl(data, ~mean(.x$count)),
# nb_fit = mclapply(data, fit_nb, mc.cores = 5),
# converged = map_lgl(nb_fit, ~.x$convergence == 0)) %>%
# filter(converged) %>%
# left_join(sample_depths, by = 'sample') %>%
# mutate(depth_adj_mean = 1e6 * count_mean / depth,
# depth_adj_mu_est = map2_dbl(nb_fit, depth, ~1e6 * .x$par[1] / .y),
# phi_est = map_dbl(nb_fit, ~.x$par[2])) %>%
# filter(depth_adj_mean > 10)
#
# nb_param_estimates %>%
# ggplot(aes(depth_adj_mu_est)) +
# geom_density(aes(color = sample)) +
# scale_x_log10()
#
# library(stringr)
# marg_prior = nb_param_estimates %>%
# mutate(acid_type = factor(str_extract(sample, 'DNA|RNA'))) %>%
# group_by(allele, acid_type) %>%
# summarise(phi_gamma_prior = list(fit_gamma(phi_est)),
# mu_gamma_prior = list(fit_gamma(depth_adj_mu_est))) %>%
# ungroup %>%
# gather(prior_type, gamma_fit, matches('gamma')) %>%
# mutate(alpha_est = map_dbl(gamma_fit, ~.x$par[1]),
# beta_est = map_dbl(gamma_fit, ~.x$par[2])) %>%
# arrange(desc(allele)) # This puts reference alleles first. This is bad practice
#
#
# load("/mnt/bigData2/andrew/analysis_data/testing_dat.RData")
#
# sample_depths = mpra_data %>%
# unnest %>%
# select(-snp_id, -allele) %>%
# gather(sample, count) %>%
# group_by(sample) %>%
# summarise(depth = sum(count))
#
# snp_dat = mpra_data$count_data[[1]] %>%
# mutate(bc_id = 1:n())
#
# data_list = list(n_rna_samples = mpra_data$count_data[[1]] %>% select(matches('RNA')) %>% ncol,
# n_dna_samples = mpra_data$count_data[[1]] %>% select(matches('DNA')) %>% ncol,
# n_barcodes = mpra_data$count_data[[1]] %>% nrow,
# rna_counts = mpra_data$count_data[[1]] %>% select(matches('RNA')) %>% as.matrix,
# dna_counts = mpra_data$count_data[[1]] %>% select(matches('DNA')) %>% as.matrix,
# allele = allele,
# rna_depths = depth_factors %>% filter(grepl('RNA', sample)) %>% pull(depth_factor),
# dna_depths = depth_factors %>% filter(grepl('DNA', sample)) %>% pull(depth_factor),
# #dna_norm_factors = inputs$DNA_norm_factor,
# rna_norm_factors = inputs$DNA_norm_factor,
# rna_m_a = marg_prior %>% filter(acid_type == 'RNA', prior_type == 'mu_gamma_prior') %>% pull(alpha_est),
# rna_m_b = marg_prior %>% filter(acid_type == 'RNA', prior_type == 'mu_gamma_prior') %>% pull(beta_est),
# rna_p_a = marg_prior %>% filter(acid_type == 'RNA', prior_type == 'phi_gamma_prior') %>% pull(alpha_est), # horrible non-alignment :(
# rna_p_b = marg_prior %>% filter(acid_type == 'RNA', prior_type == 'phi_gamma_prior') %>% pull(beta_est),
# dna_m_a = marg_prior %>% filter(acid_type == 'DNA', prior_type == 'mu_gamma_prior') %>% pull(alpha_est),
# dna_m_b = marg_prior %>% filter(acid_type == 'DNA', prior_type == 'mu_gamma_prior') %>% pull(beta_est),
# dna_p_a = marg_prior %>% filter(acid_type == 'DNA', prior_type == 'phi_gamma_prior') %>% pull(alpha_est),
# dna_p_b = marg_prior %>% filter(acid_type == 'DNA', prior_type == 'phi_gamma_prior') %>% pull(beta_est))
#
#### model string ----
# lol this is wrong
# bc_effect_model = '
# data {
# int<lower=0> n_rna_samples;
# int<lower=0> n_barcodes; // number for this allele
# int<lower=0> rna_counts[n_barcodes, n_rna_samples];
# int<lower=1, upper = 2> allele[n_barcodes]; // allele indicator; 1 = ref, 2 = alt
# real<lower=0> rna_depths[n_rna_samples];
# real<lower=0> rna_norm_factors[n_barcodes];
# real<lower=0> rna_m_a[2];
# real<lower=0> rna_m_b[2];
# real<lower=0> rna_p_a[2];
# real<lower=0> rna_p_b[2];
# }
# parameters {
# vector<lower=0>[2] r_m_i;
# vector<lower=0>[2] r_p_i;
# }
# model {
#
# // with density estimation, alleles would have different priors
# r_m_i[allele] ~ gamma(rna_m_a[allele], rna_m_b[allele]); // priors on negative binomial parameters
# r_p_i[allele] ~ gamma(rna_p_a[allele], rna_p_b[allele]); // here, both alleles come from the same prior
#
# for (s in 1:n_rna_samples) {
# for (t in 1:n_barcodes) {
# rna_counts[allele, s][t] ~ neg_binomial_2(r_m_i[allele] * rna_depths[s] * rna_norm_factors[t], r_p_i[allele]);
# }
#
# }
#
# }
# generated quantities {
# real transcription_shift;
# transcription_shift = log(r_m_i[2]) - log(r_m_i[1]);
# }
# '
#
# # Divide out average DNA count in TS? TODO
# # Rename to abundance effect
#
# bc_object = stan_model(model_code = bc_effect_model)
#### test ----
# samp_test = sampling(bc_object,
# data = data_list,
# chains = 10,
# iter = 1300,
# warmup = 300,
# cores = 10) # ~ 62 seconds
library(coda)
my_HPD <- function(obj, prob = 0.95, ...) {
dimnames(obj) = NULL # Stan outputs the iterations as only one dimname which makes as.matrix() fail
obj <- as.matrix(obj)
vals <- apply(obj, 2, sort)
if (!is.matrix(vals)) stop("obj must have nsamp > 1")
nsamp <- nrow(vals)
npar <- ncol(vals)
gap <- max(1, min(nsamp - 1, round(nsamp * prob)))
init <- 1:(nsamp - gap)
inds <- apply(vals[init + gap, ,drop=FALSE] - vals[init, ,drop=FALSE],
2, which.min)
ans <- cbind(vals[cbind(inds, 1:npar)],
vals[cbind(inds + gap, 1:npar)])
dimnames(ans) <- list(colnames(obj), c("lower", "upper"))
attr(ans, "Probability") <- gap/nsamp
ans
}
run_samp_test = function(count_data, snp_id,
save_dir = '/mnt/labhome/andrew/bayesianMPRA/analysis_outputs/bc_effect_tests/',
depth_factors,
n_cores = 10,
tot_samp = 1e4){
snp_dat = count_data %>%
mutate(bc_id = 1:n())
# For one barcode take the depth normalized values in each replicate .
dnv = snp_dat %>%
select(allele, bc_id, matches('NA')) %>%
gather(sample, count, -allele, -bc_id) %>%
left_join(depth_factors, by = 'sample') %>%
mutate(depth_norm_count = count / depth_factor)
well_represented = dnv %>%
filter(grepl('DNA', sample)) %>%
group_by(allele, bc_id) %>%
summarise(mean_depth_norm = mean(depth_norm_count)) %>%
ungroup %>%
filter(mean_depth_norm > 10)
wr_counts = well_represented %>%
count(allele)
if (any(wr_counts$n < 2) | nrow(well_represented) == 0) {
return(NA)
}
dnv %<>% filter(bc_id %in% well_represented$bc_id)
# 2) take the median of these values
medians = dnv %>%
mutate(samp_type = if_else(grepl('DNA', sample), 'DNA', 'RNA')) %>%
group_by(bc_id, samp_type) %>%
summarise(med_dnv = median(depth_norm_count)) %>%
ungroup %>%
left_join(unique(select(dnv, bc_id, allele)),
by = 'bc_id')
# 3) apply the geometric mean based procedure above
#bc_medians =
# https://stackoverflow.com/questions/2602583/geometric-mean-is-there-a-built-in
gm_mean = function(x, na.rm=TRUE){
exp(sum(log(x[x > 0]), na.rm = na.rm) / length(x))
}
geo_means = dnv %>%
mutate(samp_type = if_else(grepl('DNA', sample), 'DNA', 'RNA')) %>%
group_by(samp_type, allele) %>%
summarise(geo_mean = gm_mean(depth_norm_count))
bc_norm_factors = medians %>%
left_join(geo_means, by = c('samp_type', 'allele')) %>%
mutate(bc_norm_factor = (med_dnv / geo_mean)) %>%
select(bc_id, samp_type, bc_norm_factor) %>%
mutate(samp_type = paste0(samp_type, '_norm_factor')) %>%
spread(samp_type, bc_norm_factor)
inputs = snp_dat %>%
filter(bc_id %in% well_represented$bc_id) %>%
left_join(bc_norm_factors,
by = 'bc_id') %>%
select(allele, bc_id, DNA_norm_factor, RNA_norm_factor, everything())
count_data = snp_dat %>%
filter(bc_id %in% well_represented$bc_id)
data_list = list(n_rna_samples = count_data %>% select(matches('RNA')) %>% ncol,
n_barcodes = count_data %>% nrow,
rna_counts = count_data %>% select(matches('RNA')) %>% as.matrix,
allele = count_data %>% mutate(allele_ind = case_when(allele == 'ref' ~ 1, allele == 'mut' ~ 2)) %>% pull(allele_ind),
rna_depths = depth_factors %>% filter(grepl('RNA', sample)) %>% pull(depth_factor),
rna_norm_factors = inputs$DNA_norm_factor,
rna_m_a = marg_prior %>% filter(acid_type == 'RNA', prior_type == 'mu_gamma_prior') %>% pull(alpha_est),
rna_m_b = marg_prior %>% filter(acid_type == 'RNA', prior_type == 'mu_gamma_prior') %>% pull(beta_est),
rna_p_a = marg_prior %>% filter(acid_type == 'RNA', prior_type == 'phi_gamma_prior') %>% pull(alpha_est), # horrible non-alignment :(
rna_p_b = marg_prior %>% filter(acid_type == 'RNA', prior_type == 'phi_gamma_prior') %>% pull(beta_est))
n_samp_per_core = tot_samp / n_cores
n_iter = n_samp_per_core + 300
samp_test = sampling(bc_object,
data = data_list,
chains = n_cores,
iter = n_iter,
warmup = 300,
cores = n_cores)
save(samp_test, data_list,
file = paste0(save_dir, snp_id %>% gsub(' ', '_', .) %>% gsub('\\/', '-', .), '.RData'))
samp_test %>%
rstan::extract() %>%
.[['transcription_shift']] %>%
mcmc %>%
my_HPD
}
mean_norm_factor_test = function(count_data, snp_id,
save_dir = '/mnt/labhome/andrew/bayesianMPRA/analysis_outputs/bc_effect_tests/cd36/mean_norm_factor/',
depth_factors,
n_cores = 10,
tot_samp = 1e4){
snp_dat = count_data %>%
mutate(bc_id = 1:n())
# For one barcode take the depth normalized values in each replicate .
dnv = snp_dat %>%
select(allele, bc_id, matches('NA')) %>%
gather(sample, count, -allele, -bc_id) %>%
left_join(depth_factors, by = 'sample') %>%
mutate(depth_norm_count = count / depth_factor)
well_represented = dnv %>%
filter(grepl('DNA', sample)) %>%
group_by(allele, bc_id) %>%
summarise(mean_depth_norm = mean(depth_norm_count)) %>%
ungroup %>%
filter(mean_depth_norm > 10)
wr_counts = well_represented %>%
count(allele)
if (any(wr_counts$n < 2) | nrow(well_represented) == 0) {
return(NA)
}
dnv %<>% filter(bc_id %in% well_represented$bc_id)
bc_mean_factors = dnv %>%
mutate(samp_type = if_else(grepl('DNA', sample), 'DNA', 'RNA')) %>%
group_by(bc_id, samp_type) %>%
summarise(mean_dnv = mean(depth_norm_count)) %>% # mean depth normalized count by barcode
ungroup %>%
left_join(unique(select(dnv, bc_id, allele)), # attach on allele
by = 'bc_id')
samp_means = dnv %>%
mutate(samp_type = if_else(grepl('DNA', sample), 'DNA', 'RNA')) %>%
group_by(samp_type, allele) %>%
summarise(samp_mean = mean(depth_norm_count)) # mean depth normalized count by sample
bc_norm_factors = bc_mean_factors %>%
left_join(samp_means, by = c('samp_type', 'allele')) %>%
mutate(bc_norm_factor = (mean_dnv / samp_mean)) %>%
select(bc_id, samp_type, bc_norm_factor) %>%
mutate(samp_type = paste0(samp_type, '_norm_factor')) %>%
spread(samp_type, bc_norm_factor)
inputs = snp_dat %>%
filter(bc_id %in% well_represented$bc_id) %>%
left_join(bc_norm_factors,
by = 'bc_id') %>%
select(allele, bc_id, DNA_norm_factor, RNA_norm_factor, everything())
count_data = snp_dat %>%
filter(bc_id %in% well_represented$bc_id)
data_list = list(n_rna_samples = count_data %>% select(matches('RNA')) %>% ncol,
n_barcodes = count_data %>% nrow,
rna_counts = count_data %>% select(matches('RNA')) %>% as.matrix,
allele = count_data %>% mutate(allele_ind = case_when(allele == 'ref' ~ 1, allele == 'mut' ~ 2)) %>% pull(allele_ind),
rna_depths = depth_factors %>% filter(grepl('RNA', sample)) %>% pull(depth_factor),
rna_norm_factors = inputs$DNA_norm_factor,
rna_m_a = marg_prior %>% filter(acid_type == 'RNA', prior_type == 'mu_gamma_prior') %>% pull(alpha_est),
rna_m_b = marg_prior %>% filter(acid_type == 'RNA', prior_type == 'mu_gamma_prior') %>% pull(beta_est),
rna_p_a = marg_prior %>% filter(acid_type == 'RNA', prior_type == 'phi_gamma_prior') %>% pull(alpha_est), # horrible non-alignment :(
rna_p_b = marg_prior %>% filter(acid_type == 'RNA', prior_type == 'phi_gamma_prior') %>% pull(beta_est))
n_samp_per_core = tot_samp / n_cores
n_iter = n_samp_per_core + 300
samp_test = sampling(bc_object,
data = data_list,
chains = n_cores,
iter = n_iter,
warmup = 300,
cores = n_cores)
save(samp_test, data_list,
file = paste0(save_dir, snp_id %>% gsub(' ', '_', .) %>% gsub('\\/', '-', .), '.RData'))
samp_test %>%
rstan::extract() %>%
.[['transcription_shift']] %>%
mcmc %>%
my_HPD
}
# bc_effect_tests = mpra_data %>%
# mutate(ts_HDI = map2(count_data, snp_id, run_samp_test, depth_factors = depth_factors))
#
# save(bc_effect_tests,
# file = '/mnt/labhome/andrew/bayesianMPRA/analysis_outputs/bc_effect_tests.RData')
#### Apply to CD36 results ----
load("~/plateletMPRA/outputs/pcr_validation_pilot/controls_with_counts.RData")
load("~/plateletMPRA/outputs/pcr_validation_pilot/eqtls_with_counts.RData")
controls %<>% mutate(rs = rep(c(paste0('PKRRE', 1:5), paste0('ALAS2', 1:3), 'URUOS', 'HBG2'), each = 80))
eqtls %<>% mutate(rs = map2_chr(rs, mut, ~ifelse(.x == 'rs17154155' & .y == 'T', 'rs17154155_ALT', .x))) # this snp had two alternate alleles
pltMPRA = rbind(eqtls, controls)
pltMPRA %<>% set_colnames(gsub('_L001_R1_001|seqOnly_',
'',
names(pltMPRA))) %>%
select_all(~gsub('cDNA', 'RNA', gsub('Plasmid', 'DNA', .)))
library(stringr)
pltMPRA %<>%
select(-seq, allele = type, snp = rs)
cd36MPRA_depth_factors = pltMPRA %>%
select(matches('NA')) %>%
summarise_all(.funs = funs(sum(.) / 1e6)) %>%
gather(sample, depth_factor)
##
## estimate priors----
library(parallel)
sample_depths = pltMPRA %>%
select(matches('NA')) %>%
gather(sample, count) %>%
group_by(sample) %>%
summarise(depth = sum(count))
nb_param_estimates = pltMPRA %>%
unnest %>%
gather(sample, count, matches('[DR]NA')) %>%
group_by(snp, allele, sample) %>%
nest %>%
mutate(count_mean = map_dbl(data, ~mean(.x$count)),
nb_fit = mclapply(data, fit_nb, mc.cores = 5),
converged = map_lgl(nb_fit, ~.x$convergence == 0)) %>%
filter(converged) %>%
left_join(sample_depths, by = 'sample') %>%
mutate(depth_adj_mean = 1e6 * count_mean / depth,
depth_adj_mu_est = map2_dbl(nb_fit, depth, ~1e6 * .x$par[1] / .y),
phi_est = map_dbl(nb_fit, ~.x$par[2])) %>%
filter(depth_adj_mean > 10)
nb_param_estimates %>%
ggplot(aes(depth_adj_mu_est)) +
geom_density(aes(color = sample)) +
scale_x_log10()
library(stringr)
marg_prior = nb_param_estimates %>%
mutate(acid_type = factor(str_extract(sample, 'DNA|RNA'))) %>%
group_by(allele, acid_type) %>%
summarise(phi_gamma_prior = list(fit_gamma(phi_est)),
mu_gamma_prior = list(fit_gamma(depth_adj_mu_est))) %>%
ungroup %>%
gather(prior_type, gamma_fit, matches('gamma')) %>%
mutate(alpha_est = map_dbl(gamma_fit, ~.x$par[1]),
beta_est = map_dbl(gamma_fit, ~.x$par[2])) %>%
arrange(desc(allele))
## run test ----
# cd36_bc_effect_test = pltMPRA %>%
# mutate(allele = tolower(allele)) %>%
# group_by(snp) %>%
# nest %>%
# .[c(88),] %>%
# mutate(ts_HDI = map2(data, snp,
# run_samp_test,
# save_dir = '/mnt/labhome/andrew/bayesianMPRA/analysis_outputs/bc_effect_tests/cd36/',
# depth_factors = cd36MPRA_depth_factors,
# n_cores = 10))
#
# cd36_bc_effect_test = pltMPRA %>%
# mutate(allele = tolower(allele)) %>%
# group_by(snp) %>%
# nest %>%
# .[c(82, 88, 90, 43, 8),] %>%
# mutate(ts_HDI = map2(data, snp,
# run_samp_test,
# save_dir = '/mnt/labhome/andrew/bayesianMPRA/analysis_outputs/bc_effect_tests/cd36/',
# depth_factors = cd36MPRA_depth_factors,
# n_cores = 10))
get_post_mean = function(snp_id,
dir){
load(paste0(dir, snp_id, '.RData'))
samp_test %>%
rstan::extract() %>%
.[['transcription_shift']] %>%
mean
}
# cd36_bc_effect_test %<>%
# mutate(post_mean_ts = map_dbl(snp,
# get_post_mean),
# functional = map_lgl(ts_HDI,
# ~!between(0, .x[1], .x[2])))
# cd36_bc_effect_test = pltMPRA %>%
# mutate(allele = tolower(allele)) %>%
# group_by(snp) %>%
# nest %>%
# mutate(ts_HDI = map2(data, snp,
# run_samp_test,
# save_dir = '/mnt/labhome/andrew/bayesianMPRA/analysis_outputs/bc_effect_tests/cd36/',
# depth_factors = cd36MPRA_depth_factors,
# n_cores = 20)) %>%
# mutate(post_mean_ts = map_dbl(snp,
# get_post_mean),
# functional = map_lgl(ts_HDI,
# ~!between(0, .x[1], .x[2])))
#
# save(cd36_bc_effect_test,
# file = '~/bayesianMPRA/analysis_outputs/cd36_bc_effect_test.RData')
#
# cd36_mean_bc_effect_test = pltMPRA %>%
# mutate(allele = tolower(allele)) %>%
# group_by(snp) %>%
# nest %>%
# mutate(ts_HDI = map2(data, snp,
# mean_norm_factor_test,
# save_dir = '/mnt/labhome/andrew/bayesianMPRA/analysis_outputs/bc_effect_tests/cd36/mean_norm_factor/',
# depth_factors = cd36MPRA_depth_factors,
# n_cores = 18)) %>%
# mutate(post_mean_ts = map_dbl(snp,
# get_post_mean),
# functional = map_lgl(ts_HDI,
# ~!between(0, .x[1], .x[2])))
save(cd36_mean_bc_effect_test,
file = '~/bayesianMPRA/analysis_outputs/cd36_mean_bc_effect_test.RData')
make_ts_plot = function(snp_id,
dir = '/mnt/labhome/andrew/bayesianMPRA/analysis_outputs/bc_effect_tests/cd36/') {
load(paste0(dir, snp_id, '.RData'))
samp_test %>%
rstan::extract() %>%
.[['transcription_shift']] %>%
data_frame(transcription_shift = .) %>%
ggplot(aes(transcription_shift)) +
geom_histogram(aes(y = ..density..),
bins = 40) +
geom_density() +
labs(title = paste0(snp_id, ' Barcode effect model TS'),
subtitle = 'TS = log(alt RNA mean) - log(ref RNA mean) after accounting for depth and barcode')
}
#
# tmp = mclapply(cd36_mean_bc_effect_test$snp,
# make_ts_plot, mc.cores = 10)
#### Double normalization model ----
bc_effect_model = '
data {
int<lower=0> n_rna_samples;
int<lower=1> n_ref;
int<lower=1> n_mut;
int<lower=0> ref_counts[n_ref, n_rna_samples];
int<lower=0> mut_counts[n_mut, n_rna_samples];
real<lower=0> rna_depths[n_rna_samples];
real<lower=0> ref_rna_norm_factors[n_ref];
real<lower=0> mut_rna_norm_factors[n_mut];
real<lower=0> rna_m_a[2];
real<lower=0> rna_m_b[2];
real<lower=0> rna_p_a[2];
real<lower=0> rna_p_b[2];
}
parameters {
vector<lower=0>[2] r_m_i;
vector<lower=0>[2] r_p_i;
}
model {
// with density estimation, alleles would have different priors
for (allele in 1:2) {
r_m_i[allele] ~ gamma(rna_m_a[allele], rna_m_b[allele]); // priors on negative binomial parameters
r_p_i[allele] ~ gamma(rna_p_a[allele], rna_p_b[allele]); // here, both alleles come from the same prior
}
for (s in 1:n_rna_samples) {
for (t in 1:n_ref) {
ref_counts[t, s] ~ neg_binomial_2(r_m_i[1] * rna_depths[s] * ref_rna_norm_factors[t], r_p_i[1]);
}
for (t in 1:n_mut) {
mut_counts[t, s] ~ neg_binomial_2(r_m_i[2] * rna_depths[s] * mut_rna_norm_factors[t], r_p_i[2]);
}
}
}
generated quantities {
real transcription_shift;
transcription_shift = log(r_m_i[2]) - log(r_m_i[1]);
}
'
bc_object = stan_model(model_code = bc_effect_model)
bc_norm_factor_test = function(count_data, snp_id,
save_dir = '/mnt/labhome/andrew/bayesianMPRA/analysis_outputs/bc_effect_tests/cd36/mean_norm_factor/',
depth_factors,
n_cores = 10,
tot_samp = 1e4){
snp_dat = count_data %>%
mutate(bc_id = 1:n())
# For one barcode take the depth normalized values in each replicate .
dnv = snp_dat %>% # depth_normalized_values
select(allele, bc_id, matches('[DR]NA')) %>%
gather(sample, count, -allele, -bc_id) %>%
left_join(depth_factors, by = 'sample') %>%
mutate(depth_norm_count = count / depth_factor)
well_represented = dnv %>%
filter(grepl('DNA', sample)) %>%
group_by(allele, bc_id) %>%
summarise(mean_depth_norm = mean(depth_norm_count)) %>%
ungroup %>%
filter(mean_depth_norm > 10)
wr_counts = well_represented %>%
count(allele)
if (any(wr_counts$n < 2) | nrow(well_represented) == 0) {
return(NA)
}
dnv %<>% filter(bc_id %in% well_represented$bc_id)
bc_mean_factors = dnv %>%
mutate(samp_type = if_else(grepl('DNA', sample), 'DNA', 'RNA')) %>%
group_by(bc_id, samp_type) %>%
summarise(mean_dnv = mean(depth_norm_count)) %>% # mean depth normalized count by barcode
ungroup %>%
left_join(unique(select(dnv, bc_id, allele)), # attach on allele
by = 'bc_id')
samp_means = dnv %>%
mutate(samp_type = if_else(grepl('DNA', sample), 'DNA', 'RNA')) %>%
group_by(samp_type, allele) %>%
summarise(samp_mean = mean(depth_norm_count)) %>% # mean depth normalized count by sample
ungroup
bc_norm_factors = bc_mean_factors %>%
left_join(samp_means, by = c('samp_type', 'allele')) %>%
mutate(bc_norm_factor = (mean_dnv / samp_mean)) %>%
select(bc_id, samp_type, bc_norm_factor) %>%
mutate(samp_type = paste0(samp_type, '_norm_factor')) %>%
spread(samp_type, bc_norm_factor)
inputs = snp_dat %>%
filter(bc_id %in% well_represented$bc_id) %>%
left_join(bc_norm_factors,
by = 'bc_id') %>%
select(allele, bc_id, DNA_norm_factor, RNA_norm_factor, everything())
count_data = snp_dat %>%
filter(bc_id %in% well_represented$bc_id)
data_list = list(n_rna_samples = count_data %>% select(matches('RNA')) %>% ncol,
n_barcodes = inputs %>% nrow,
ref_counts = inputs %>% filter(allele == 'ref') %>% select(matches('RNA')) %>% select(-matches('norm')) %>% as.matrix,
mut_counts = inputs %>% filter(allele == 'mut') %>% select(matches('RNA')) %>% select(-matches('norm')) %>% as.matrix,
n_ref = inputs$allele %>% table() %>% .['ref'],
n_mut = inputs$allele %>% table() %>% .['mut'],
rna_depths = depth_factors %>% filter(grepl('RNA', sample)) %>% pull(depth_factor),
ref_rna_norm_factors = inputs %>% filter(allele == 'ref') %>% pull(DNA_norm_factor),
mut_rna_norm_factors = inputs %>% filter(allele == 'mut') %>% pull(DNA_norm_factor),
rna_m_a = marg_prior %>% filter(acid_type == 'RNA', prior_type == 'mu_gamma_prior') %>% pull(alpha_est),
rna_m_b = marg_prior %>% filter(acid_type == 'RNA', prior_type == 'mu_gamma_prior') %>% pull(beta_est),
rna_p_a = marg_prior %>% filter(acid_type == 'RNA', prior_type == 'phi_gamma_prior') %>% pull(alpha_est), # horrible non-alignment :(
rna_p_b = marg_prior %>% filter(acid_type == 'RNA', prior_type == 'phi_gamma_prior') %>% pull(beta_est))
n_samp_per_core = tot_samp / n_cores
n_iter = n_samp_per_core + 300
samp_test = sampling(bc_object,
data = data_list,
chains = n_cores,
iter = n_iter,
warmup = 300,
cores = n_cores)
save(samp_test, data_list,
file = paste0(save_dir, snp_id %>% gsub(' ', '_', .) %>% gsub('\\/', '-', .), '.RData'))
samp_test %>%
rstan::extract() %>%
.[['transcription_shift']] %>%
mcmc %>%
my_HPD
}
cd36_bc_effect_test = pltMPRA %>%
mutate(allele = tolower(allele)) %>%
group_by(snp) %>%
nest %>%
mutate(ts_HDI = map2(data, snp,
bc_norm_factor_test,
save_dir = '/mnt/labhome/andrew/bayesianMPRA/analysis_outputs/bc_effect_tests/cd36/bc_norm_factor/',
depth_factors = cd36MPRA_depth_factors,
n_cores = 18)) %>%
mutate(post_mean_ts = map_dbl(snp,
get_post_mean),
functional = map_lgl(ts_HDI,
~!between(0, .x[1], .x[2])))
load_and_get_ts_hdi = function(snp_id){
load(paste0('/mnt/labhome/andrew/bayesianMPRA/analysis_outputs/bc_effect_tests/cd36/bc_norm_factor/', snp_id, '.RData'))
samp_test %>%
rstan::extract() %>%
.[['transcription_shift']] %>%
mcmc %>%
my_HPD
}
cd36_bc_effect_test %<>%
mutate(ts_HDI = map(snp,
load_and_get_ts_hdi),
functional = map_lgl(ts_HDI,
~!between(0, .x[1], .x[2])),
post_mean_ts = map_dbl(snp,
get_post_mean,
dir = '/mnt/labhome/andrew/bayesianMPRA/analysis_outputs/bc_effect_tests/cd36/bc_norm_factor/'))
save(cd36_mean_bc_effect_test,
file = '~/bayesianMPRA/analysis_outputs/cd36_mean_bc_effect_test.RData')
|
61326f7910991a7c71f6af290ac4034812a1f68a | 183fd9e1be55fa642da502f75062041797412182 | /forDan/exampleOutput.R | 899cbcd2160d7ecbc96eccb12221808b5d7cd190 | [] | no_license | batharseneca/Textures-In-HD | bbfb029791913db5e9e41f9ebc34822dc2df3825 | 3d554029315148345a4d0fb7fe99354b2ed5ae59 | refs/heads/master | 2016-09-12T21:45:22.178983 | 2016-04-12T18:01:13 | 2016-04-12T18:01:13 | 58,826,026 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,194 | r | exampleOutput.R |
HDx <- rnorm(300,3,0.5)
HDy <- rnorm(300,3,0.5)
HDlab <- c(rep("HD",300))
NOx <- rnorm(300,1,0.5)
Noy <- rnorm(300,1,0.5)
NOlab <- c(rep("NO",300))
HD.df <- data.frame(HDx,HDy,HDlab)
names(HD.df) <- c("x","y","label")
NO.df <- data.frame(NOx,Noy,NOlab)
names(NO.df) <- c("x","y","label")
dframe <- rbind(HD.df,NO.df)
library(ggplot2)
getwd()
?png
setwd("C:/Users/Nishanth/Documents/ImageAnalysis-IGP/GUI_Random/")
png("sampleGraph.png",width=500,height=500,units="px")
g <- ggplot(data=dframe,aes(x=x,y=y))
g <- g + geom_point(aes(color=label),size=5,alpha=0.3) + geom_point(color="black",size=2,alpha=1)
g <- g + theme_bw() + xlab("Texture Feature 1") + ylab("Texture Feature 2")
g <- g + scale_colour_discrete(name="Diseased Condition",breaks=c("HD","NO"),labels=c("Diseased","Healthy"))
g <- g+theme(axis.text=element_text(size=12),axis.title=element_text(size=14,face="bold"))
g <- g + theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank())
g
dev.off()
|
5514aa15ac9309a33947f18830083e0cf1713d25 | 565f6a4e33ea63a9596cc105ce7046174c75bab8 | /R/pipeTopGO.R | b63be024562a921037e63192c5f02f81e2924267 | [] | no_license | jtlovell/RNAseqDE | d684af6cd07d9f1288123a1d57c556ca8a03cc8c | 906d88f7b69d9be364ae0cff2f13be0606509a3c | refs/heads/master | 2020-03-16T02:51:29.214965 | 2018-05-07T15:33:12 | 2018-05-07T15:33:12 | 132,474,442 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,797 | r | pipeTopGO.R | #' @title Run gene ontology enrichment analyses
#'
#' @description
#' \code{pipeTopGO} Methods to simplify running limma::topGO from a table-like
#' GO annotation database.
#'
#' @param genes.of.interest A character vector representing the genes that are
#' to be tested.
#' @param GO.db The GO database in tabular format. One column must contain the
#' unique gene identifier. Gene IDs must not be replicated. Multiple GO terms
#' must be separated by comma (or similar) in a single dataframe column.
#' @param GO.db.colname The name of the column that contains the GO terms
#' @param GO.db.geneIDs The name of the GO.db column that contains the unique
#' gene identifier
#' @param GO.db.sep The character that separates GO terms.
#' @param cull2genes Specify if the background to test should be a gene set
#' other than the entire GO database
#' @param output Should the output be culled so that GO terms with P
#' values equal to 1 are not returned.
#' @details More here soon.
#' @return A tabular presentation of GO terms and the resulting statistics
#' @export
pipeTopGO<-function(genes.of.interest,
GO.db,
GO.db.colname = "GO",
GO.db.geneIDs = "geneID",
GO.db.sep = ",",
min.n.annot = 0,
cull2genes = NULL,
output = "culled"){
if(!GO.db.colname %in% colnames(GO.db))
stop("GO.db.colname must be a column name in GO.db\n")
if(!requireNamespace("topGO", quietly = TRUE)){
stop("install the topGO package before running\n")
}else{
require("topGO", quietly = TRUE)
}
ids<-GO.db[,GO.db.geneIDs]
GO.db<-lapply(1:nrow(GO.db), function(x) strsplit(GO.db[,GO.db.colname][x],GO.db.sep)[[1]])
names(GO.db)<-ids
nas<-sapply(GO.db, function(x) is.na(x[1]))
GO.db<-GO.db[!nas]
if(min.n.annot>0){
tab<-table(unlist(GO.db))
go2drop<-names(tab)[tab<min.n.annot]
GO.db<-lapply(GO.db, function(x) x[!x %in% go2drop])
}
geneID2GO = GO.db
if(!is.null(cull2genes)){
geneID2GO<-geneID2GO[cull2genes]
}
geneNames <- names(geneID2GO)
geneList <- factor(as.integer(geneNames %in% genes.of.interest))
names(geneList) <- geneNames
GOdata <- new("topGOdata",
ontology = "BP",
allGenes = geneList,
annotationFun = annFUN.gene2GO,
gene2GO = geneID2GO)
resultFis <- runTest(GOdata, algorithm = "classic", statistic = "fisher")
if(output == "culled"){
n.non0<-length(score(resultFis)[score(resultFis)!=1])
}else{
n.non0<-length(score(resultFis))
}
allRes <- data.frame(GenTable(GOdata, resultFis, topNodes = n.non0))
colnames(allRes)[6]<-"Pvalue"
allRes$fdr.Pvalue<-p.adjust(allRes$Pvalue, method = "fdr")
return(allRes)
}
|
6d801c5318322b0237cdef7c9c4e1390a7b3ce41 | a71f5727b67ecd4b9a3dd506749dd39c47264904 | /R语言统计分析与应用/《R语言统计分析与应用》配套程序/第八章/example8_8.R | 27650421262093af45b5538f7deb748860e6f1f8 | [] | no_license | wwjvictor/test-R | 4085408ae64c48cda7dd34128e4dac3e1dd1a7a3 | be292db7c0288b02d9ce0a749b3af64043f99634 | refs/heads/master | 2020-12-14T00:01:31.140699 | 2020-01-17T15:08:09 | 2020-01-17T15:08:09 | 234,570,309 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 596 | r | example8_8.R | > Example8_8 <- read.table ("example8_8.csv", header=TRUE, sep=",")
> attach(Example8_8)
> site <-factor(c, order=FALSE)
> rabbnum <-factor(r, order=FALSE)
> table(site, rabbnum, z)
> aggregate(x, by=list(site), FUN=mean)
> aggregate(x, by=list(site), FUN=sd)
> aggregate(x, by=list(rabbnum), FUN=mean)
> aggregate(x, by=list(rabbnum), FUN=sd)
> aggregate(x, by=list(z), FUN=mean)
> aggregate(x, by=list(z), FUN=sd)
> fit <- aov(x ~ site + rabbnum + z)
> summary(fit)
> TukeyHSD(fit, "site")
> TukeyHSD(fit, "rabbnum")
> TukeyHSD(fit, "z")
> detach(Example8_8)
|
7ab789e42502eb5b556ac014011cb87b5fdc71f6 | 862b25a1ff1b6c5550cae3bdc95dbe31c0cd0ee7 | /man/powerpoint_theme.Rd | f80357ba5865994209dc7a3ac753252039c3c100 | [
"MIT"
] | permissive | joelnitta/jntools | f2e664008e5e5241354e2747ee629aecae78517c | 9d64a36799e4f5adcb15431f678385a5e1753fd6 | refs/heads/master | 2022-01-27T20:00:15.218790 | 2022-01-13T04:41:20 | 2022-01-13T04:41:20 | 136,439,973 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 492 | rd | powerpoint_theme.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots.R
\name{powerpoint_theme}
\alias{powerpoint_theme}
\title{powerpoint_theme}
\usage{
powerpoint_theme()
}
\value{
ggplot object with larger font sizes
}
\description{
Increase font size of titles for powerpoint
}
\examples{
library(ggplot2)
p1 <- ggplot(iris, aes(Sepal.Length, Petal.Length)) +
geom_point(aes(color = Species))
p1 + powerpoint_theme()
}
\author{
Joel H Nitta, \email{joelnitta@gmail.com}
}
|
2f12122c6b863e0edbca6e24dbbfb30c05670b3a | cd82731e5755625d0f65151430b47d8d86737530 | /man/TSS.Rd | 423c2ec9152366054731a3f6d501a194aff87031 | [
"MIT"
] | permissive | ArefinMizan/jeksterslabRlinreg | 31a2d8f9201bf084b385a52e8788b7d7a5225307 | 21b2ed9dcae3b6c275b573b4a71438558c35d08d | refs/heads/master | 2023-03-19T10:08:30.303897 | 2020-12-30T22:31:36 | 2020-12-30T22:31:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,648 | rd | TSS.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SS.R
\name{TSS}
\alias{TSS}
\title{Total Sum of Squares.}
\usage{
TSS(y)
}
\arguments{
\item{y}{Numeric vector of length \code{n} or \code{n} by \code{1} matrix.
The vector \eqn{\mathbf{y}} is an \eqn{n \times 1} vector of observations
on the regressand variable.}
}
\value{
Returns the total sum of squares \eqn{\left( \mathrm{TSS} \right)}.
}
\description{
Calculates the total sum of squares \eqn{\left( \mathrm{TSS} \right)} using
\deqn{
\mathrm{TSS} = \sum_{i = 1}^{n} \left( Y_i - \bar{Y} \right)^2 \\
= \sum_{i = 1}^{n} Y_{i}^{2} - n \bar{Y}^2
}
In matrix form
\deqn{
\mathrm{TSS} = \sum_{i = 1}^{n} \left( \mathbf{y} - \mathbf{\bar{y}} \right)^2
}
Equivalent computational matrix formula
\deqn{
\mathrm{TSS} = \mathbf{y}^{\prime} \mathbf{y} - n \mathbf{\bar{Y}}^{2}.
}
Note that
\deqn{
\mathrm{TSS} = \mathrm{ESS} + \mathrm{RSS} .
}
}
\examples{
y <- jeksterslabRdatarepo::wages.matrix[["y"]]
TSS(y = y)
}
\references{
\href{https://en.wikipedia.org/wiki/Residual_sum_of_squares}{Wikipedia: Residual Sum of Squares}
\href{https://en.wikipedia.org/wiki/Explained_sum_of_squares}{Wikipedia: Explained Sum of Squares}
\href{https://en.wikipedia.org/wiki/Total_sum_of_squares}{Wikipedia: Total Sum of Squares}
\href{https://en.wikipedia.org/wiki/Coefficient_of_determination}{Wikipedia: Coefficient of Determination}
}
\seealso{
Other sum of squares functions:
\code{\link{.ESS}()},
\code{\link{.RSS}()},
\code{\link{ESS}()},
\code{\link{RSS}()}
}
\author{
Ivan Jacob Agaloos Pesigan
}
\concept{sum of squares functions}
\keyword{SS}
|
bbc673e204dac753aed71675278220af2ae860e9 | 129a996a3dce9fe55a1bb2324c11665db2618c97 | /man/Rtran.Rd | eb76c04bafa8b4da392aa18aa5125d156af9a607 | [
"MIT"
] | permissive | Liripo/Ryoudao | 5d23c547f69539428e74c54534dc0644f4df5eb4 | 9c39ae44a2e15d2bda735774964b5659176d299c | refs/heads/master | 2022-11-26T18:11:57.880947 | 2020-07-27T04:10:20 | 2020-07-27T04:10:20 | 270,169,560 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 684 | rd | Rtran.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Rtran.R
\name{Rtran}
\alias{Rtran}
\title{Rtran}
\usage{
Rtran(
q = q,
from = "zh-CHS",
to = "en",
app_key = NULL,
app_secret = NULL,
...
)
}
\arguments{
\item{q}{search word}
\item{from}{search word Language}
\item{to}{target Language}
\item{app_key}{youdao app}
\item{app_secret}{youdao app sercet}
\item{...}{code_digest function param}
}
\value{
translation character
}
\description{
Rtran
}
\examples{
Rtran(q = "鐖卞洜鏂潶",system = "windows")
data <- c("math","english","chinese")
tran <- sapply(data,Rtran,from = "en",to = "zh-CHS",system = "WINDOWS")
}
\author{
Liripo
}
|
f6720d121ff8db375f972a0982c59cb6f799ea58 | cf33f3793250f2839d8455f85f578b906de4d6b1 | /src/Scripts/match-vs-cachelines.r | 4c3d7cd49c8e1e7c5550b3a3cbbdcb8ad00eeca0 | [
"MIT"
] | permissive | BitFunnel/BitFunnel | 693570ebc3a06a4c406b7f0c56d50500e0eda738 | b8ec70eeb3aa2f6aef6166feb6780fac3acf981b | refs/heads/master | 2023-07-04T15:33:37.248624 | 2022-01-04T20:27:35 | 2022-01-04T20:27:35 | 55,266,587 | 402 | 47 | MIT | 2021-10-03T07:56:48 | 2016-04-01T22:44:40 | C++ | UTF-8 | R | false | false | 3,784 | r | match-vs-cachelines.r | library("broom")
library("ggplot2")
library("reshape2")
setwd("~/dev/BitFunnel/src/Scripts")
args = commandArgs(trailingOnly=TRUE)
if (length(args) != 4) {
stop("Required args: [interpreter QueryPipelineStats filename], [compiler QPS filename], [cachelines vs time filename], [matches vs. time filename]", call.=FALSE)
}
int_name = args[1]
comp_name = args[2]
out_name1 = args[3]
out_name2 = args[4]
print("Reading input.")
interpreter <- read.csv(header=TRUE, file=int_name)
compiler <- read.csv(header=TRUE, file=comp_name)
df <- data.frame(interpreter$cachelines, compiler$match)
names(df)[names(df) == 'interpreter.cachelines'] <- 'Cachelines'
names(df)[names(df) == 'compiler.match'] <- 'MatchTime'
# print("Plotting cachelines vs. time.")
# png(filename=out_name1,width=1600,height=1200)
# ggplot(df, aes(x=Cachelines,y=MatchTime)) +
# theme_minimal() +
# geom_point(alpha=1/100) +
# theme(axis.text = element_text(size=40),
# axis.title = element_text(size=40)) +
# ylim(0, 0.001)
# dev.off()
# print("Plotting matches vs. time.")
# png(filename=out_name2,width=1600,height=1200)
# ggplot(compiler, aes(x=matches,y=match)) +
# theme_minimal() +
# geom_point(alpha=1/20) +
# theme(axis.text = element_text(size=40),
# axis.title = element_text(size=40))
# dev.off()
# print("Computing cacheline regression.")
# df <- data.frame(interpreter$cachelines, compiler$matches, compiler$match)
# names(df)[names(df) == 'interpreter.cachelines'] <- 'Cachelines'
# names(df)[names(df) == 'compiler.matches'] <- 'Matches'
# names(df)[names(df) == 'compiler.match'] <- 'Time'
# fit <- lm(Time ~ Matches, data=df)
# print(summary(fit))
# fit <- lm(Time ~ Cachelines, data=df)
# print(summary(fit))
# fit <- lm(Time ~ ., data=df)
# print(summary(fit))
# print("Residual plot.")
# df <- augment(fit)
# # TODO: don't hardcode filename.
# png(filename="time-residual.png",width=1600,height=1200)
# ggplot(df, aes(x = .fitted, y = .resid)) +
# theme_minimal() +
# geom_point(alpha=1/10) +
# theme(axis.text = element_text(size=40),
# axis.title = element_text(size=40))
# dev.off()
print("Computing quadword regression.")
df <- data.frame(interpreter$quadwords, compiler$matches, compiler$match)
names(df)[names(df) == 'interpreter.quadwords'] <- 'Quadwords'
names(df)[names(df) == 'compiler.matches'] <- 'Matches'
names(df)[names(df) == 'compiler.match'] <- 'Time'
fit <- lm(Time ~ Matches, data=df)
print(summary(fit))
fit <- lm(Time ~ Quadwords, data=df)
print(summary(fit))
fit <- lm(Time ~ ., data=df)
print(summary(fit))
df <- data.frame(interpreter$quadwords, compiler$match)
names(df)[names(df) == 'interpreter.quadwords'] <- 'Quadwords'
names(df)[names(df) == 'compiler.match'] <- 'MatchTime'
print("Plotting quadwords vs. time.")
# png(filename=out_name1,width=1600,height=1200)
ggplot(df, aes(x=Quadwords,y=MatchTime)) +
theme_minimal() +
geom_smooth(method = "lm", se = FALSE) +
theme(aspect.ratio=1/2) +
geom_point(alpha=1/10) +
theme(axis.text = element_text(size=20),
axis.title = element_text(size=20)) +
# ylim(0, 0.0005) +
scale_y_continuous(name="Match Time (us)", labels=c("0", "100", "200", "300", "400", "500"), breaks=c(0, 0.0001, 0.0002, 0.0003, 0.0004, 0.0005), limits=c(0, 0.0005)) +
# dev.off()
ggsave(out_name1, width = 10, height=5)
# df <- data.frame(interpreter$matches, compiler$match)
# names(df)[names(df) == 'interpreter.matches'] <- 'Matches'
# names(df)[names(df) == 'compiler.match'] <- 'MatchTime'
# print("Plotting quadwords vs. time.")
# png(filename=out_name1,width=1600,height=1200)
# ggplot(df, aes(x=Matches,y=MatchTime)) +
# theme_minimal() +
# geom_point(alpha=1/10) +
# theme(axis.text = element_text(size=40),
# axis.title = element_text(size=40)) +
# ylim(0, 0.002)
# dev.off()
|
a91178fd4fae7f091a2f87c9978f3b1d36e6bbbc | 67c77f4ab034a77c14ae34ed2882504993e283ae | /Expr.R | fd77c74c3298576f885700be883fbc401dcd281e | [] | no_license | maxim-h/ERBB | d2eb89c7d806a800f13dec6092ff1b7455f47528 | 2b9402a4b3f41e25b26427f66d6ceab9c99f72c4 | refs/heads/master | 2020-03-13T08:39:35.326693 | 2018-04-29T09:06:05 | 2018-04-29T09:06:05 | 131,047,925 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,095 | r | Expr.R | library(DESeq2)
library(data.table)
library(dplyr)
library(KEGGprofile)
library(biomaRt)
library(plyr)
library(ggplot2)
sample_table <- function(directory){
sampleName <- grep("counts",list.files(directory),value=TRUE)
sampleCondition <- rep(directory, times=length(sampleName))
sampleFiles = paste0(directory, "/", sampleName)
return(cbind(sampleName, sampleFiles, sampleCondition))
}
sampleTable <- sub(".tsv", "", list.files(".", pattern = ".tsv")) %>% lapply(., sample_table) %>% do.call(rbind, .) %>% data.frame
sampleTable <- sampleTable[!duplicated(sampleTable$sampleName),]
ddsHTSeq <- DESeqDataSetFromHTSeqCount(sampleTable = sampleTable,
design= ~ sampleCondition)
dds <- DESeq(ddsHTSeq)
vst_dds <- vst(dds)
#counts.norm <- assay(vst_dds)
plotPCA(vst_dds, intgroup=c("sampleCondition")) + theme_bw()
c <- counts(dds, normalized=T)
rownames(c) <- gsub("\\..*$", "", rownames(c))
conv <- convertId(c, filters="entrezgene")
ens <- gsub("\\..*$", "", rownames(c))
length(unique(ens)) == length(ens)
mart<- useDataset("hsapiens_gene_ensembl", useMart("ensembl"))
ensembl_genes<- rownames(c)
bm <- getBM(
filters= "ensembl_gene_id",
attributes= c("ensembl_gene_id", "entrezgene", "description"),
values= ensembl_genes,
mart= mart)
bm$description <- NULL
c <- data.frame(c)
c$ensgene <- rownames(c)
m <- merge(c, bm, by.x = "ensgene", by.y = "ensembl_gene_id")
m <- data.table(m)
m$ensgene <- NULL
a <- aggregate(. ~ entrezgene, data=m, median)
rownames(a) <- a$entrezgene
a$entrezgene <- NULL
plot_pathway(a[,!grepl("13", sampleTable$sampleCondition)], pathway_id = "04012",species='hsa', type="lines", result_name="12.png")
erbb <- c("1956", "2064", "2065", "2066")
erbb_expr <- a[rownames(a) %in% erbb,]
pl <- data.frame(list(erbb=colMeans(as.matrix(erbb_expr)), codon=sampleTable$sampleCondition, file=sampleTable$sampleName))
ggplot(data=pl, aes(x=codon,y=erbb, fill=codon))+
geom_violin()
## Pre-Filtering. Remove rows with few counts
keep <- rowSums(counts(dds)) >= 10
dds <- dds[keep,]
|
4ec68c39a92e7213b55a5e60cb25187f11197083 | 7917fc0a7108a994bf39359385fb5728d189c182 | /cran/paws.analytics/man/glue_get_partition_indexes.Rd | e7a7d9384194f1399579394484fd3b7251ff7650 | [
"Apache-2.0"
] | permissive | TWarczak/paws | b59300a5c41e374542a80aba223f84e1e2538bec | e70532e3e245286452e97e3286b5decce5c4eb90 | refs/heads/main | 2023-07-06T21:51:31.572720 | 2021-08-06T02:08:53 | 2021-08-06T02:08:53 | 396,131,582 | 1 | 0 | NOASSERTION | 2021-08-14T21:11:04 | 2021-08-14T21:11:04 | null | UTF-8 | R | false | true | 1,683 | rd | glue_get_partition_indexes.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glue_operations.R
\name{glue_get_partition_indexes}
\alias{glue_get_partition_indexes}
\title{Retrieves the partition indexes associated with a table}
\usage{
glue_get_partition_indexes(CatalogId, DatabaseName, TableName,
NextToken)
}
\arguments{
\item{CatalogId}{The catalog ID where the table resides.}
\item{DatabaseName}{[required] Specifies the name of a database from which you want to retrieve
partition indexes.}
\item{TableName}{[required] Specifies the name of a table for which you want to retrieve the
partition indexes.}
\item{NextToken}{A continuation token, included if this is a continuation call.}
}
\value{
A list with the following syntax:\preformatted{list(
PartitionIndexDescriptorList = list(
list(
IndexName = "string",
Keys = list(
list(
Name = "string",
Type = "string"
)
),
IndexStatus = "CREATING"|"ACTIVE"|"DELETING"|"FAILED",
BackfillErrors = list(
list(
Code = "ENCRYPTED_PARTITION_ERROR"|"INTERNAL_ERROR"|"INVALID_PARTITION_TYPE_DATA_ERROR"|"MISSING_PARTITION_VALUE_ERROR"|"UNSUPPORTED_PARTITION_CHARACTER_ERROR",
Partitions = list(
list(
Values = list(
"string"
)
)
)
)
)
)
),
NextToken = "string"
)
}
}
\description{
Retrieves the partition indexes associated with a table.
}
\section{Request syntax}{
\preformatted{svc$get_partition_indexes(
CatalogId = "string",
DatabaseName = "string",
TableName = "string",
NextToken = "string"
)
}
}
\keyword{internal}
|
a35d845b3852a4c0ef78f853892114442e44989b | 8fabf5687421859c4f68b3063cb52d5acfb941af | /ml.species.delim/ml.species.delimitation.R | e3a002b5518a5cb4272e722feb97822417479ac4 | [] | no_license | DevonDeRaad/aph.rad | 01aa3def14387d84cc24aae3824586141884429e | 7de8067fee4d3bdc0fce65e55f0e17b8c53f078d | refs/heads/master | 2023-05-25T06:45:01.277996 | 2022-06-16T19:54:17 | 2022-06-16T19:54:17 | 296,434,325 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,860 | r | ml.species.delimitation.R | ###############################################
###############################################
## script adapted from Derkarabetian S., Castillo S., Peter K.K., Ovchinnikov S., Hedin M. "An Empirical Demonstration of Unsupervised Machine Learning in Species Delimitation"
###############################################
###############################################
#required packages
library("adegenet")
library("randomForest")
library("PCDimension")
library("mclust")
library("cluster")
library("MASS")
library("factoextra")
library("tsne")
# import str file. Adjust input file name, n.ind, and n.loc for specific file/dataset.
# example dataset used in this study
# data <- read.structure("Metano_UCE_SNPs_70percent_random_structure-adegenet.str", n.ind=30, n.loc=316, onerowperind=FALSE, col.lab=1, col.pop=3, col.others=NULL, row.marknames=NULL, NA.char="-9", pop=NULL, ask=FALSE, quiet=FALSE)
# data <- read.structure("input.str", n.ind=XX, n.loc=XX, onerowperind=FALSE, col.lab=1, col.pop=0, col.others=NULL, row.marknames=NULL, NA.char="-9", pop=NULL, ask=FALSE, quiet=FALSE)
#read in vcf as vcfR
#vcfR <- read.vcfR("~/Desktop/aph.data/unlinked.filtered.recode.vcf")
vcfR <- read.vcfR("~/Desktop/aph.data/unzipped.filtered.vcf")
dim(vcfR@gt) #1779 out of 16307 SNPs contain no missing data
#filter out SNPs with missing data
#filter to only Z chrom SNPs
e<-vcfR2genlight(vcfR)
e<-as.matrix(e)
dim(e)
e<-e[,vcfR@fix[,1] == "PseudochrZ"]
#calculate Z chrom heterozygosity for all samples
f<-c()
for (i in 1:nrow(e)){
f[i]<-sum(e[i,] == 1, na.rm=T)
}
hist(f, nclass = 20)
table(f)
#vcfR@fix<-vcfR@fix[rowSums(is.na(vcfR@gt)) == 0,]
#vcfR@gt<-vcfR@gt[rowSums(is.na(vcfR@gt)) == 0,]
#convert vcfR into a 'genind' object
data<-vcfR2genind(vcfR)
#scale the genind
data_scaled <- scaleGen(data, center=FALSE, scale=FALSE, NA.method=c("mean"), nf)
data_scaled <- scaleGen(data, center=FALSE, scale=FALSE)
###############################################
###############################################
# PCA and DAPC
###############################################
###############################################
# DAPC (interactive, requires input)
#clusters <- find.clusters(data, max.n.clust=10, n.iter=1e6, n.start=10)
#with the appropriate settings
clusters <- find.clusters(data, max.n.clust=10, n.iter=1e6, n.start=10, n.pca = 50, n.clust = 6)
#results <- dapc(data, clusters$grp, perc.pca = NULL)
#with appropriate settings
results <- dapc(data, clusters$grp, perc.pca = NULL, n.pca = 6, n.da = 4)
compoplot(results)
scatter.dapc(results, xax = 1, yax=2)
scatter.dapc(results, xax = 3, yax=4)
dap<-results$tab
dap$clusters<-clusters$grp
ggplot(data=dap, aes(x=`PCA-pc.3`, y=`PCA-pc.4`, col=clusters))+
geom_point(cex=3)+
theme_classic()
#prefers 6 groups with texas as a unique cluster
grp_k <- nlevels(clusters$grp)
# PCA, can adjust nf to include more components
pca1 <- dudi.pca(data_scaled, center=TRUE, scale=TRUE, scannf=FALSE, nf=5)
# PCA with DAPC groups
pc<-pca1$li
ggplot(data=pc, aes(x=Axis3, y=Axis4, col=dap$clusters))+
geom_point(cex=3)+
theme_classic()
# pam clustering on pca output
for (i in 2:10){
print(paste(i, mean(silhouette(pam(pc, i))[, "sil_width"])))
}
#prefers 6 groups with non-perfect split between US and Mexico
pam(pc, 6)
# determine optimal k of PCA via hierarchical clustering with BIC
# adjust G option to reasonable potential cluster values, e.g. for up to 12 clusters, G=1:12
pca_clust <- Mclust(pc, G=1:10)
pca_clust$classification
#prefers 6 groups, with non-perfect split between US and Mexico
###############################################
###############################################
# into the Random Forest, unsupervised
###############################################
###############################################
# convert genind scaled data to factors for randomForest
data_conv <- as.data.frame(data_scaled)
data_conv[is.na(data_conv)] <- ""
data_conv[sapply(data_conv, is.integer)] <- lapply(data_conv[sapply(data_conv, is.integer)], as.factor)
data_conv[sapply(data_conv, is.character)] <- lapply(data_conv[sapply(data_conv, is.character)], as.factor)
nsamp <- nrow(data_conv)
# unsupervised random forest
rftest <- randomForest(data_conv, ntree=5000)
#rftest <- randomForest(pca1$tab, ntree=500)
#rftest <- randomForest(data_scaled, ntree=500)
###############
# classic MDS
###############
# cMDS with optimal number of components to retain using broken-stick
cmdsplot1 <- MDSplot(rf=rftest, fac=results$grp, k=10) # may need to adjust number of dimensions if given error
cmdsplot_bstick <- PCDimension::bsDimension(cmdsplot1$eig)
cmdsplot2 <- MDSplot(rftest, results$grp, cmdsplot_bstick)
#cMDS plot with dapc groups
cmds<-as.data.frame(cmdsplot2$points)
ggplot(data=cmds, aes(x=`Dim 1`, y=`Dim 2`, col=dap$clusters))+
geom_point(cex=3)+
theme_classic()
# pam clustering on cMDS output
for (i in 2:10){
print(paste(i, mean(silhouette(pam(cmdsplot1$points, i))[, "sil_width"])))
}
#prefers 6 groups, matching dapc
DAPC_pam_clust_prox <- pam(cmdsplot1$points, 6)
DAPC_pam_clust_prox$clustering
# cMDS with optimal k and clusters via PAM
cmds$clusters<-as.factor(DAPC_pam_clust_prox$clustering)
ggplot(data=cmds, aes(x=`Dim 1`, y=`Dim 2`, col=clusters))+
geom_point(cex=3)+
theme_classic()
# determine optimal k from cMDS via hierarchical clustering with BIC
# adjust G option to reasonable potential cluster values, e.g. for up to 12 clusters, G=1:12
cmdsplot_clust <- Mclust(cmdsplot2$points)
cmdsplot_clust$classification
#hierarchical clustering of random forest identifies 7 groups
# cMDS with optimal k and clusters of RF via hierarchical clustering
cmds$clusters<-as.factor(cmdsplot_clust$classification)
ggplot(data=cmds, aes(x=`Dim 1`, y=`Dim 2`, col=clusters))+
geom_point(cex=3)+
theme_classic()
###############
# isotonic MDS
###############
# isoMDS
isomdsplot <- isoMDS(1-rftest$proximity)
# "The output of cmdscale on 1 - rf$proximity is returned invisibly" (MDSplot documentation)
#plot isomds with dapc groups
df<-as.data.frame(isomdsplot$points)
ggplot(data=df, aes(x=V1, y=V2, col=results$grp))+
geom_point(cex=3)+
theme_classic()
# pam clustering on isomds with optimal k from DAPC
for (i in 2:10){
print(paste(i, mean(silhouette(pam(isomdsplot$points, i))[, "sil_width"])))
}
#pam prefers only 2 groups, Florida and everything else
# determine optimal k of RF via hierarchical clustering with BIC
# adjust G option to reasonable potential cluster values, e.g. for up to 12 clusters, G=1:12
isomdsplot_clust <- Mclust(isomdsplot$points, G =1:10)
isomdsplot_clust$classification
#prefers only 2 groups
# isoMDS with optimal k and clusters of RF via hierarchical clustering
ggplot(data=df, aes(x=V1, y=V2, col=as.factor(isomdsplot_clust$classification)))+
geom_point(cex=3)+
theme_classic()
###############################################
###############################################
# t-SNE
###############################################
###############################################
# prepare plot labels and such
# this makes it so it is grouped by DAPC clusters
colors = rainbow(length(unique(results$grp)))
names(colors) = unique(results$grp)
ecb = function(x,y){plot(x,t='n'); text(x, labels=results$grp, col=colors[results$grp])}
# t-SNE on principal components of scaled data
# adjust perplexity, initial_dims
# can do k=3 for 3D plot
# should do only <50 variables
# can do it on pca$li (if you reduce the number of components), or on cmdsplot2$points
tsne_p5 = tsne(pca1$tab, epoch_callback=ecb, max_iter=5000, perplexity=5, initial_dims=5)
# tSNE plot with DAPC groups
plot(tsne_p5, main="t-SNE perplexity=5 with DAPC optimal k and clusters", col=results$grp, pch=16)
# pam clustering with optimal k from DAPC
for (i in 2:10){
print(paste(i, mean(silhouette(pam(tsne_p5, i))[, "sil_width"])))
}
#pam prefers same 6 groups as DAPC
pam(tsne_p5, 6)
# determine optimal k of tSNE via hierarchical clustering with BIC
# adjust G option to reasonable potential cluster values, e.g. for up to 12 clusters, G=1:12
tsne_p5_clust <- Mclust(tsne_p5)
mclust_grps_tsne_p5 <- as.numeric(tsne_p5_clust$classification)
max(mclust_grps_tsne_p5)
# t-SNE p5 with optimal k and clusters of RF via hierarchical clustering
plot(tsne_p5, xlab="Scaling Dimension 1", ylab="Scaling Dimension 2", main="t-SNE p5 RF optimal K and clusters (hierarchical clustering)", col=mclust_grps_tsne_p5, pch=16)
mclust_grps_tsne_p5
f<-as.data.frame(tsne_p5)
# tSNE with optimal k and clusters via hierarchical clustering
ggplot(data=f, aes(x=V1, y=V2, col=as.factor(mclust_grps_tsne_p5)))+
geom_point(cex=3)+
theme_classic()
cbind(rownames(pca1$tab), mclust_grps_tsne_p5)
#prefers 6 groups where woodhouseii is split into US and Mexico groups, with Texas lumped with the US
#bring in results from VAE and visualize here:
|
dbd8212718030b69090f9e4a6cb03be44f234ae4 | cc423d827ec05e57886562e7efdac4561abc51c5 | /ShinyAppV2/rowApp_Tab/nav_TechnicalGuidance/tab_TechnicalGuidance_Overview.R | bc0b4d4a15185cadc9a01319443442feb9a73b9c | [] | no_license | andej016/rowApp | 3cc8065e0407314dc75f6178a102cfd9a4443f5e | f5dc5ea60c7c03b43da79e882b97207fe0cea793 | refs/heads/master | 2020-03-07T05:43:25.829537 | 2018-05-10T14:45:02 | 2018-05-10T14:45:02 | 127,303,905 | 0 | 0 | null | 2018-04-09T13:45:57 | 2018-03-29T14:32:46 | R | UTF-8 | R | false | false | 1,459 | r | tab_TechnicalGuidance_Overview.R | ## Technical Guidance- Overview
rowApp_Tab_TechnicalGuidance_Overview <-
tabItem(tabName = "overview",
sidebarPanel(
id="sidebar",
div(
class = "view-split-nav view-split-block",
div(
class = "page-header",
h1("Technical Guidance Overview")
),
br(),
br(),
paste("For more information or assistance email",
emailAddress,
sep = " ")
)
),
mainPanel(
HTML(
"Rowing Techinique takes many forms. In this app we have attempted to
subset the rowing stroke to provide technical insight
on each element of the stroke.<br>
There are many articles available in the menu expressing opinions about
these different elements.<br>
It should be noted that every coach and athelete has a different opinion
about what the correct or most effective technique is. This site is intended
to aid, advise and potentionally confuse athletes and coaches. Please read over
the material and utilise the information as you see fit.<br>
<br>
Finally, if you have anything to add or further technical guidance to give please
email this to us."
)
)
) |
a94c3e6e011afefec46d2242dd452f7ffda9c591 | c9e3109e190b9ae3d59459176b047655284e0b3b | /fish-analysis.R | f433235d63a1e744ac55e6aa9f01bea5d16de5d7 | [] | no_license | fish497-2018/StevenRoberts | 8818d58e895f803f6f9fee961df9ed7381cb8fea | 75099285eb5a17dd885b004d432126234d3f7e3f | refs/heads/master | 2020-03-15T22:17:31.331966 | 2018-05-07T21:18:46 | 2018-05-07T21:18:46 | 132,370,345 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 252 | r | fish-analysis.R |
fish_data = read.csv("Gaeta_etal_CLC_data.csv")
library(dplyr)
fish_data_cat = fish_data %>%
mutate(length_cat = ifelse(length > 200, "big", "small"))
fish_data_cat = fish_data %>%
mutate(length_cat = ifelse(length > 300, "big", "small"))
|
1421b90ccdbdf3632864de706e15275e080e878d | 5e853d060ad8b6baf64a55d9c423c1720420678b | /R/transcripts.R | ed559ba50bb5509393a57eb732fce3517f90359b | [] | no_license | zwdzwd/sesameData | 173a78fd51b4ba10d545f4f1979b9a8059f9b1a5 | 2928d9f6733dec7be243c8c029ae1987e63bc07f | refs/heads/master | 2023-05-13T17:58:00.796169 | 2023-04-05T20:48:26 | 2023-04-05T20:48:26 | 128,075,677 | 0 | 8 | null | 2021-05-03T14:12:20 | 2018-04-04T14:38:55 | R | UTF-8 | R | false | false | 6,847 | r | transcripts.R |
read_GENCODE_gtf <- function(x) {
## https://www.gencodegenes.org/pages/data_format.html
download.file(x, sprintf("%s/gtf.gz", tempdir()), mode="wb")
gtf <- read_tsv(sprintf("%s/gtf.gz", tempdir()), comment="#",
col_names = c("chrm", "source", "feature_type", "start", "end",
"score_not_used", "strand", "cds_phase", "additional"),
col_types=cols(
start=col_integer(),
end=col_integer(),
.default=col_character()))
gtf
}
read_GENCODE_gtf_transcript <- function(gtf) {
g <- gtf[gtf$feature_type == "transcript", ]
g$transcript_id <- str_match(g$additional, 'transcript_id "([^"]*)"')[,2]
g$transcript_name <- str_match(
g$additional, 'transcript_name "([^"]*)"')[,2]
g$transcript_type <- str_match(
g$additional, 'transcript_type "([^"]*)"')[,2]
g$gene_id <- str_match(g$additional, 'gene_id "([^"]*)"')[,2]
g$gene_name <- str_match(g$additional, 'gene_name "([^"]*)"')[,2]
g$gene_type <- str_match(g$additional, 'gene_type "([^"]*)"')[,2]
## there is also transcript_name, is it useful? not included
g$level <- str_match(g$additional, 'level ([^;]*)')[,2]
## gene_status and transcript_status are obsolete after 25 and M11
g
}
read_GENCODE_gtf_exon <- function(gtf) {
g <- gtf[gtf$feature_type == "exon", ]
g$transcript_id <- str_match(g$additional, 'transcript_id "([^"]*)"')[,2]
g$exon_id <- str_match(g$additional, 'exon_id "([^"]*)"')[,2]
g$exon_number <- str_match(g$additional, 'exon_number ([^;]*)')[,2]
g
}
guess_chrmorder <- function(chrms) {
chrms1 <- chrms[!(chrms %in% c("chrX","chrY","chrM"))]
paste0("chr",c(as.character(seq_len(max(as.integer(str_replace(
sort(unique(chrms1)), "chr", "")), na.rm=TRUE))), c("X","Y","M")))
}
#' build GENCODE gtf
#'
#' @param x GENCODE ftp url
#' @return GRangesList
#' @importFrom readr read_tsv
#' @importFrom readr cols
#' @importFrom readr col_integer
#' @importFrom readr col_character
#' @importFrom GenomeInfoDb Seqinfo
#' @importFrom IRanges IRanges
#' @import stringr
#' @import GenomicRanges
build_GENCODE_gtf <- function(x) {
gtf <- read_GENCODE_gtf(x)
## transcript
g1 <- read_GENCODE_gtf_transcript(gtf)
stopifnot(length(g1$transcript_id) == length(unique(g1$transcript_id)))
## exon
g2 <- read_GENCODE_gtf_exon(gtf)
chrms <- guess_chrmorder(g2$chrm)
gr <- GRanges(seqnames = g2$chrm, ranges=IRanges(g2$start, g2$end),
strand = g2$strand, seqinfo=Seqinfo(chrms))
mcols(gr)$exon_number <- as.integer(g2$exon_number)
names(gr) <- g2$exon_id
grl <- GRangesList(split(gr, g2$transcript_id)) # slow
stopifnot(length(grl) == length(g1$transcript_id))
stopifnot(all(sort(names(grl)) == sort(g1$transcript_id)))
## CDS
g3 <- gtf[gtf$feature_type == "CDS", ]
g3$transcript_id <- str_match(g3$additional, 'transcript_id "([^"]*)"')[,2]
tid2start <- vapply(split(g3$start, g3$transcript_id), min, numeric(1))
tid2end <- vapply(split(g3$end, g3$transcript_id), max, numeric(1))
g1$cdsStart <- tid2start[g1$transcript_id]
g1$cdsEnd <- tid2end[g1$transcript_id]
## put together
g1 <- g1[order(factor(g1$chrm, levels=chrms), g1$start),]
grl <- grl[g1$transcript_id]
mcl <- g1[match(names(grl), g1$transcript_id), c(
"chrm", "start", "end", "strand",
"transcript_id", "transcript_type", "transcript_name",
"gene_name", "gene_id", "gene_type", "source", "level",
"cdsStart", "cdsEnd")]
colnames(mcl)[2] <- "transcript_start"
colnames(mcl)[3] <- "transcript_end"
colnames(mcl)[4] <- "transcript_strand"
mcols(grl) <- mcl
grl
}
#' convert GRangesList to transcript GRanges
#'
#' @param genome hg38, mm10, ...
#' @param grl GRangesList object
#' @return a GRanges object
#' @examples
#' txns <- sesameData_getTxnGRanges("mm10")
#' ## get verified protein-coding
#' txns <- txns[(txns$transcript_type == "protein_coding" & txns$level <= 2)]
#'
#' @export
sesameData_getTxnGRanges <- function(genome = NULL, grl = NULL) {
if (is.null(grl)) {
genome <- sesameData_check_genome(genome, NULL)
grl <- sesameDataGet(sprintf("genomeInfo.%s", genome))$txns
}
mcl <- mcols(grl)
gr <- GRanges(
seqnames = mcl$chrm, ranges = IRanges(
mcl$transcript_start, mcl$transcript_end),
strand = mcl$transcript_strand, seqinfo = seqinfo(grl))
names(gr) <- mcl$transcript_id
mcols(gr) <- mcl[,colnames(mcl)[!(colnames(mcl) %in% c(
"chrm","transcript_id", "transcript_start",
"transcript_end","transcript_strand"))]]
gr <- sort(gr, ignore.strand = TRUE)
gr
}
#' convert transcript GRanges to gene GRanges
#'
#' @param txns GRanges object
#' @return a GRanges object
#' @examples
#' txns <- sesameData_getTxnGRanges("mm10")
#' genes <- sesameData_txnToGeneGRanges(txns)
#'
#' @export
sesameData_txnToGeneGRanges <- function(txns) {
gene_ids <- unique(txns$gene_id)
gene2starts <- split(start(txns), txns$gene_id)[gene_ids]
gene2ends <- split(end(txns), txns$gene_id)[gene_ids]
genes <- GRanges(seqnames = seqnames(txns)[match(gene_ids, txns$gene_id)],
IRanges(
vapply(gene2starts, min, integer(1)),
vapply(gene2ends, max, integer(1))),
strand = strand(txns)[match(gene_ids, txns$gene_id)])
names(genes) <- gene_ids
mcols(genes)$gene_name <- txns$gene_name[match(names(genes), txns$gene_id)]
mcols(genes)$gene_type <- txns$gene_type[match(names(genes), txns$gene_id)]
sort(genes, ignore.strand=TRUE)
}
#' get genes next to certain probes
#'
#' @param Probe_IDs probe IDs
#' @param platform EPIC, HM450, ... will infer if not given
#' @param genome hg38, mm10, ... will infer if not given.
#' For additional mapping, download the GRanges object from
#' http://zwdzwd.github.io/InfiniumAnnotation
#' and provide the following argument
#' ..., genome = sesameAnno_buildManifestGRanges("downloaded_file"),...
#' to this function.
#' @param max_distance maximum distance to gene (default: 10000)
#' @return a GRanges object for overlapping genes
#' @importMethodsFrom IRanges subsetByOverlaps
#' @examples
#' sesameData_getGenesByProbes(c("cg14620903","cg22464003"))
#' @export
sesameData_getGenesByProbes <- function(
Probe_IDs, platform = NULL, genome = NULL, max_distance = 10000) {
if (is.null(platform)) {
platform <- inferPlatformFromProbeIDs(Probe_IDs) }
genes <- sesameData_txnToGeneGRanges(
sesameData_getTxnGRanges(
sesameData_check_genome(genome, platform)))
probes <- sesameData_getManifestGRanges(platform, genome=genome)
## not every probes are mappable
probes <- probes[names(probes) %in% Probe_IDs]
subsetByOverlaps(genes, probes + max_distance)
}
|
65b88759b3d8a501f0421426f22f3dedc82c1b60 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/PCovR/examples/ErrorRatio.Rd.R | 4af9fe1d18841398e8db8e179db71416ae18ca39 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 213 | r | ErrorRatio.Rd.R | library(PCovR)
### Name: ErrorRatio
### Title: Error variance ratio
### Aliases: ErrorRatio
### Keywords: regression
### ** Examples
data(psychiatrists)
ratio <- ErrorRatio(psychiatrists$X,psychiatrists$Y)
|
180090f86c9fb58cb879cbf8e22eaafb7cf8239e | c85a7198653461c25d031f7e93d78368b1eb6833 | /man/fill_na.Rd | 26172ef3c9445fdbfdf0dbe33fbc6662fdaeb80e | [] | no_license | rBatt/timeScales | 70a6159fde3c5062d6abe37e58811d251dcf384e | c25bb8bbc486147a5529bed75ea6b150575ed18e | refs/heads/master | 2021-08-30T16:57:41.955274 | 2021-08-10T23:35:16 | 2021-08-10T23:35:16 | 100,967,320 | 0 | 4 | null | null | null | null | UTF-8 | R | false | true | 561 | rd | fill_na.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fill_na.R
\name{fill_na}
\alias{fill_na}
\title{Fill-in NA's}
\usage{
fill_na(x)
}
\arguments{
\item{x}{vector with NA's that is to be interpolated}
}
\value{
a numeric vector or a \code{ts} object
}
\description{
Fill NA valus with linearly interpolated values
}
\details{
If starting or ended values are NA, repeats nearest non-NA value. If \code{x} is a time series (object of class \code{ts}), the output is also a \code{ts}.
}
\seealso{
\code{\link{approx}} \code{\link{ts}}
}
|
5aa9b331620fd56a0a4af0ba1da280602a14b888 | 92cc6f096d238bc24c3f4d9c1cb747276497886b | /data_cleaning.R | 3151fbca1eb908ff84622b0534b3bee6c416a741 | [] | no_license | agisga/SISBID2016BigDataWranglingR | 4a6cf48032d99d10db37495a550e0cff911c2468 | ed045d1231f92a1baef7742da517e8f90126c6ed | refs/heads/master | 2021-01-09T20:14:14.362248 | 2016-07-13T19:01:50 | 2016-07-13T19:01:50 | 63,177,240 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,328 | r | data_cleaning.R | #--- String trimming
name = c("Andrew", " Andrew", "Andrew ", "Andrew\t")
library(stringr)
table(name)
name <- str_trim(name)
table(name)
#--- String splitting
# base R:
x <- c("I really", "like writing", "R code programs")
y <- strsplit(x, split = " ") # returns a list
# stringr:
y2 <- str_split(x, " ") # returns a list
substr(name, 2, 4)
str_split("asdf asdf asdf qwer asdf", " ")
str_split("asdf.asdf.asdf.asdf", ".") # `.` treated as regular expression
str_split("asdf.asdf.asdf.asdf", fixed(".")) # `.` treated as character
#--- Extract
library(dplyr)
y
sapply(y, dplyr::first) # on the fly
sapply(y, nth, 2) # on the fly
sapply(y, last) # on the fly
ss = str_extract(Sal$Name, "Rawling")
head(ss)
ss[ !is.na(ss)]
# with regular exporessions:
head(Sal$AgencyID)
head(str_extract(Sal$AgencyID, "\\d"))
head(str_extract_all(Sal$AgencyID, "\\d"))
#--- Grep
Sal = read.csv("Baltimore_City_Employee_Salaries_FY2014.csv",
as.is = TRUE)
head(Sal)
any(is.na(Sal$Name))
all(complete.cases(Sal)) #returns TRUE if EVERY value of a row is NOT NA
# base R:
grep("Rawlings",Sal$Name)
grep("Rawlings",Sal$Name,value=TRUE)
head(grepl("Rawlings",Sal$Name))
which(grepl("Rawlings", Sal$Name))
Sal[grep("Rawlings",Sal$Name),]
# stringr and dplyr:
head(str_detect(Sal$Name, "Rawlings"))
which(str_detect(Sal$Name, "Rawlings"))
str_subset(Sal$Name, "Rawlings")
Sal %>% filter(str_detect(Name, "Rawlings"))
# with regular expressions in base R:
head(grep("^Payne.*", x = Sal$Name, value = TRUE))
head(grep("Leonard.?S", x = Sal$Name, value = TRUE))
head(grep("Spence.*C.*", x = Sal$Name, value = TRUE))
# with regular expressions in stringr:
head(str_subset(Sal$Name, "^Payne.*"))
head(str_subset(Sal$Name, "Leonard.?S"))
head(str_subset(Sal$Name, "Spence.*C.*"))
#--- Replacing and subbing
# in base R:
Sal$AnnualSalary <- as.numeric(gsub(pattern = "$", replacement="",
Sal$AnnualSalary, fixed=TRUE))
Sal <- Sal[order(Sal$AnnualSalary, decreasing=TRUE), ]
Sal[1:5, c("Name", "AnnualSalary", "JobTitle")]
# in stringr and dplyr:
dplyr_sal = Sal
dplyr_sal = dplyr_sal %>% mutate(
AnnualSalary = AnnualSalary %>%
str_replace(
fixed("$"),
"") %>%
as.numeric) %>%
arrange(desc(AnnualSalary))
check_Sal = Sal
rownames(check_Sal) = NULL
all.equal(check_Sal, dplyr_sal)
|
c86821b44bbb4e622cf3db4806b4c4635ac589b9 | ca8dd4c043368c43cc42aafcba8bf8f3a6ac77b5 | /R/simple.R | 4af7eb2fe0655ab49d65ab92221c5833b1e12280 | [] | no_license | gudaleon/semeco | 47a01d82df04b88a740e4d4c9195e5aea7632292 | 1b2491ffe83c18303d8df91b1628ffecf0479d7d | refs/heads/master | 2020-03-20T05:34:54.292407 | 2016-09-23T11:09:49 | 2016-09-23T11:09:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 49 | r | simple.R | #' Toy data
#'
#' @docType data
#' @name np
NULL
|
2d9f0bf5f339a72b3242551fa7d8fee7b2e2f274 | 011ee506f52512d7245cf87382ded4e42d51bbd9 | /R/calc_summ_stats.R | ac11bb0d9dfa0ab6e3d9ab0204a22be38151bbcf | [
"MIT"
] | permissive | emilelatour/lamisc | ff5e4e2cc76968787e96746735dbadf1dd864238 | e120074f8be401dc7c5e7bb53d2f2cc9a06dd34a | refs/heads/master | 2023-08-28T02:15:00.312168 | 2023-07-27T23:39:58 | 2023-07-27T23:39:58 | 123,007,972 | 7 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,009 | r | calc_summ_stats.R |
#' Title
#' Calculate summary / descriptive statistics
#'
#' @description
#' This function provide some brief overview statstics for selected variables of
#' a tbl_df. Number of observations (n), complete observations (complete),
#' missing observations (missing); mean, standard deviation (sd), minimum value
#' (p0), maximum value (p100), median (p50), interquartile rane (p25, p75).
#'
#' @importFrom dplyr across
#' @importFrom dplyr group_vars
#' @importFrom dplyr group_by
#' @importFrom dplyr mutate
#' @importFrom dplyr one_of
#' @importFrom dplyr summarise
#' @importFrom tidyr pivot_longer
#' @importFrom rlang .data
#'
#' @param .data A tbl
#' @param ... Variables to summarise
#'
#' @return A tbl
#'
#' @rdname calc_summ_stats
#' @export
#'
#' @examples
#' library(dplyr)
#' library(ggplot2) # to get the starwars data set
#'
#' # descriptive stats for height and mass
#' starwars %>%
#' calc_summ_stats(
#' height, mass
#' )
#'
#' # Grouped by gender
#' starwars %>%
#' group_by(gender) %>%
#' calc_summ_stats(
#' height, mass
#' )
#'
#' # Derive variables within function then summarise
#' starwars %>%
#' calc_summ_stats_t(
#' heightm = height / 100,
#' bmi = mass / heightm^2
#' )
#'
#' # Grouped by gender
#' starwars %>%
#' group_by(gender) %>%
#' calc_summ_stats_t(
#' heightm = height / 100,
#' bmi = mass / heightm^2
#' )
#'
#' # Doesn't work with factors/characters as of 2018-01-19
#' # starwars %>%
#' # calc_summ_stats(
#' # height, mass, gender
#' # )
#'
calc_summ_stats <- function(.data, ...) {
.data %>%
tidyr::pivot_longer(data = .,
cols = c(...,
-dplyr::one_of(dplyr::group_vars(.))),
names_to = "variable",
values_to = "value",
names_transform = list(key = forcats::fct_inorder)) %>%
dplyr::group_by(.data$variable,
.add = TRUE) %>%
dplyr::summarise(dplyr::across(.cols = c(.data$value),
.fns = summary_functions,
.names = "{.fn}"),
.groups = "drop") %>%
dplyr::mutate(range = .data$p100 - .data$p0,
CV = 100 * .data$sd / .data$mean) %>%
dplyr::mutate(variable = as.character(.data$variable))
}
#' @rdname calc_summ_stats
#' @export
calc_summ_stats_t <- function(.data, ...) {
.data %>%
dplyr::transmute(...) %>%
tidyr::pivot_longer(data = .,
cols = c(dplyr::everything(),
-dplyr::one_of(dplyr::group_vars(.))),
names_to = "variable",
values_to = "value",
names_transform = list(key = forcats::fct_inorder)) %>%
dplyr::group_by(.data$variable,
.add = TRUE) %>%
dplyr::summarise(dplyr::across(.cols = c(.data$value),
.fns = summary_functions,
.names = "{.fn}"),
.groups = "drop") %>%
dplyr::mutate(range = .data$p100 - .data$p0,
CV = 100 * .data$sd / .data$mean) %>%
dplyr::mutate(variable = as.character(.data$variable))
}
#### Function to calc summary stas --------------------------------
summary_functions <- list(
n = ~ length(.),
complete = ~ sum(!is.na(.)),
missing = ~ sum(is.na(.)),
mean = ~ mean(., na.rm = TRUE),
sd = ~ sd(., na.rm = TRUE),
p0 = ~ min(., na.rm = TRUE),
p25 = ~ quantile(., probs = 0.25, na.rm = TRUE),
p50 = ~ quantile(., probs = 0.50, na.rm = TRUE),
p75 = ~ quantile(., probs = 0.75, na.rm = TRUE),
p100 = ~ max(., na.rm = TRUE)
)
#### Old version with _at verbs --------------------------------
#' calc_summ_stats <- function(.data, ...) {
#'
#' .data %>%
#' # dplyr::transmute(...) %>%
#' tidyr::gather(key = "variable",
#' value = "value",
#' ...,
#' -dplyr::one_of(dplyr::group_vars(.)),
#' factor_key = TRUE) %>%
#' group_by(.data$variable, .add = TRUE) %>%
#' summarise_at(vars(.data$value),
#' summary_functions) %>%
#' mutate(range = .data$p100 - .data$p0,
#' CV = 100 * .data$sd / .data$mean) %>%
#' dplyr::mutate(variable = as.character(.data$variable))
#' }
#'
#' calc_summ_stats_t <- function(.data, ...) {
#' .data %>%
#' dplyr::transmute(...) %>%
#' tidyr::gather(key = "variable",
#' value = "value",
#' -dplyr::one_of(dplyr::group_vars(.)),
#' factor_key = TRUE) %>%
#' group_by(.data$variable, .add = TRUE) %>%
#' summarise_at(vars(.data$value),
#' summary_functions) %>%
#' mutate(range = .data$p100 - .data$p0,
#' CV = 100 * .data$sd / .data$mean) %>%
#' dplyr::mutate(variable = as.character(.data$variable))
#' }
|
26c63d0c2c94e2a36065f59ce352cf0ae2d07fb6 | b38e141f5ae6bd780375d019e31d94279e287249 | /Code/SimulateCulturalEvolution_ModifyPhylogeny.R | 28a8ad048c60e476dfc18de061a5bd9bbf0ba040 | [] | no_license | dieterlukas/CulturalMacroevolution_Simulation | 146591ccc487e5dfe6b7240e4e57f739986b6bce | 44f47ed95b50a62a6e8aeae5e9751998efc0d6ad | refs/heads/master | 2023-05-01T16:47:05.396583 | 2021-05-05T10:22:50 | 2021-05-05T10:22:50 | 280,188,326 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,457 | r | SimulateCulturalEvolution_ModifyPhylogeny.R | # One part of the simulation is to assess whether the shape of the tree, and in particular the branch lenghts have an influence on the inferences
# For this, we built four additional variants of each phylogenetic tree:
# Grafentree: a tree with branch lengths based on Grafen's method (all tips equidistant from root, branch length depends onnumber of nodes between root and tip)
# Onetree: a tree with all branch lengths set to have the same length of one
# Earlytree: a tree with early diversification and long branches leading to the tips
# Latetree: a tree with recent diversification and long branches between clades
if(Option=="WNAI") {
#Modify the tree of the WNAI societies for the analyses
#Add branch lengths to the tree based on Grafen's method (all tips equidistant from root, branch length depends onnumber of nodes between root and tip)
Grafentree<-compute.brlen(Americantree,method="Grafen")
#Add branch lengths to the tree assuming that all branches have the same length of one
Onetree<-compute.brlen(Americantree,1)
#Add branch lengths to the tree with early diversification and long branches to the tips
Earlytree<-compute.brlen(Americantree,method="Grafen",power=0.25)
#Add branch lengths to the tree with lots of recent diversification and long branches between clades
Latetree<-compute.brlen(Americantree,method="Grafen",power=1.5)
#Some analyses need a rooted, fully bifurcating tree
Grafentree<-root(Grafentree,node=173)
Grafentree<-multi2di(Grafentree)
Grafentree<-compute.brlen(Grafentree,method="Grafen")
Onetree<-root(Onetree,node=173)
Onetree<-multi2di(Onetree)
Onetree<-compute.brlen(Onetree,1)
Latetree<-root(Latetree,node=173)
Latetree<-multi2di(Latetree)
Latetree<-compute.brlen(Latetree,method="Grafen",power=1.5)
Earlytree<-root(Earlytree,node=173)
Earlytree<-multi2di(Earlytree)
Earlytree<-compute.brlen(Earlytree,method="Grafen",power=0.25)
}
#------------------------------------------------------------------------------------------
if(Option=="PamaNyungan") {
#Modify the tree of the PamaNyungan societies for the analyses
PamaNyungantree<-force.ultrametric(PamaNyungantree)
#Add branch lengths to the tree based on Grafen's method (all tips equidistant from root, branch length depends onnumber of nodes between root and tip)
Grafentree<-compute.brlen(PamaNyungantree,method="Grafen")
#Add branch lengths to the tree assuming that all branches have the same length of one
Onetree<-compute.brlen(PamaNyungantree,1)
#Add branch lengths to the tree with early diversification and long branches to the tips
Earlytree<-compute.brlen(PamaNyungantree,method="Grafen",power=0.25)
#Add branch lengths to the tree with lots of recent diversification and long branches between clades
Latetree<-compute.brlen(PamaNyungantree,method="Grafen",power=1.5)
#Some analyses need a rooted, fully bifurcating tree
Grafentree<-root(Grafentree,node=307)
Grafentree<-multi2di(Grafentree)
Grafentree<-compute.brlen(Grafentree,method="Grafen")
Onetree<-root(Onetree,node=307)
Onetree<-multi2di(Onetree)
Onetree<-compute.brlen(Onetree,1)
Latetree<-root(Latetree,node=307)
Latetree<-multi2di(Latetree)
Latetree<-compute.brlen(Latetree,method="Grafen",power=1.5)
Earlytree<-root(Earlytree,node=307)
Earlytree<-multi2di(Earlytree)
Earlytree<-compute.brlen(Earlytree,method="Grafen",power=0.25)
}
|
d9feee02c3aaf511cba4aaa95fcd4cb9bfaa7252 | 9acb2fb21cdf2d24ebefdb9da896cd2399ec2df3 | /Assignment01.R | 8957cd905e8a729d13954640d41b4e42644b746d | [] | no_license | aParticularCode/ComputingForAnalytics | 190496d38d28afaa70c1b614057b05425f391cec | 2de5bf075ec5369236c9043bc7c81951d090d5f0 | refs/heads/master | 2020-04-07T02:14:05.343710 | 2018-11-17T11:35:44 | 2018-11-17T11:35:44 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,939 | r | Assignment01.R | setwd("~/Dropbox/R Programming Course Materials/Week 1/datasets")
## WHO dataset
WHO <- read.csv("WHO.csv")
# Country with the lowest literacy
WHO$Country[which.min(WHO$LiteracyRate)]
# Richest country in Europe based on GNI
WHO.Europe <- subset(WHO, Region == "Europe")
WHO.Europe$Country[which.max(WHO.Europe$GNI)]
# Mean Life expectancy of countries in Africa
WHO.Africa <- subset(WHO, Region == "Africa")
mean(WHO.Africa$LifeExpectancy)
# Number of countries with population greater than 10,000
sum(WHO$Population > 10000)
# Top 5 countries in the Americas with the highest child mortality
top5 <- order(WHO.Americas$ChildMortality, decreasing = TRUE)[1:5]
WHO.Americas$Country[top5]
## NBA dataset (Historical NBA Performance.xlsx)
# The year Bulls has the highest winning percentage
library(readxl)
NBA = read_excel("Historical NBA Performance.xlsx")
NBA.Bulls = subset(NBA, Team == "Bulls")
NBA.Bulls$Year[which.max(NBA.Bulls$`Winning Percentage`)]
# Teams with an even win-loss record in a year
NBA.EvenWinLoss = subset(NBA, NBA$`Winning Percentage`==0.5)
NBA.EvenWinLoss
## Seasons_Stats.csv
# Player with the highest 3-pt attempt rate in a season.
# Player with the highest free throw rate in a season.
# What year/season does Lebron James scored the highest?
Seasons_Stats = read.csv("Seasons_Stats.csv")
Lebron = subset(Seasons_Stats, Player=="LeBron James")
Lebron$Year[which.max(Lebron$PTS)]
# What year/season does Michael Jordan scored the highest?
Jordan = subset(Seasons_Stats, Player =="Michael Jordan*")
Jordan$Year[which.max(Jordan$PTS)]
# Player efficiency rating of Kobe Bryant in the year where his MP is the lowest?
Kobe = subset(Seasons_Stats, Player == "Kobe Bryant")
Kobe$PER[which.min(Kobe$MP)]
## National Universities Rankings.csv
univ = read.csv("National Universities Rankings.csv")
# University with the most number of undergrads
# Average Tuition in the Top 10 University
|
a73ce175a6e9393c0f4a076da12a29bfd987f5a2 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/preprocomb/examples/testpreprocessors.Rd.R | 6ee24d8d840d29d32d3701554285d7cfbddc1a8f | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 176 | r | testpreprocessors.Rd.R | library(preprocomb)
### Name: testpreprocessors
### Title: test preprocessing techniques against data
### Aliases: testpreprocessors
### ** Examples
testpreprocessors()
|
f484540139bd7a9225c7668e084e69e85cb30c39 | e1f1b00d4fbd43b8cdde939fdfe4a40527391e01 | /2013/Scripts e Dados/gera_matriz_MT.R | 2e5cec25c201075d5856130f921c4be7e88a9b2e | [] | no_license | ghnunes/TRI | 32e6fb0d0c61db61e235841e8f3d73e218cf4fa0 | 5ab35c684dab2c6b42748b2627642229dfb9fb62 | refs/heads/master | 2020-04-08T16:38:56.643164 | 2018-11-28T17:44:57 | 2018-11-28T17:44:57 | 159,528,229 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 22,228 | r | gera_matriz_MT.R | rm(list=ls())
library(ff)
library(compiler)
enableJIT(3)
library(stringr)
library(microbenchmark)
dados <-read.csv("amostra_enem2013_50k.csv")
dados$IN_PRESENCA_CN<-NULL
dados$IN_PRESENCA_CH<-NULL
dados$IN_PRESENCA_LC<-NULL
dados$IN_PRESENCA_MT<-NULL
dados$X<-NULL
contadorAux <-0
respostas_MT <- matrix(0, nrow=50000, ncol = 45)
vet_respostas <- c("C","B","A","D","A","A","D","A","D","A","A","B","C","A","D","A","C","D","E","A","A","B","C","A","D","A","C","D","E","A","A","B","C","A","D","A","C","D","E","A","A","C","D","E","A")
gabarito_MT <- c("E","D","B","C","A","A","E","C","A","A","B","B","B","D","C","E","C","D","D","A","E","B","A","C","E","B","E","C","B","D","A","D","E","D","C","D","C","B","C","B","A","D","C","D","E")
for(i in 1:nrow(dados)){
rm(resposta)
contadorAux <- contadorAux + 1
print(contadorAux)
resposta<-dados[i,8]
for(k in 1:45){
vet_respostas[k]<-str_sub(resposta,k,k)
}
if(dados[i,4]==179){ #prova AMARELA
if(gabarito_MT[1]==vet_respostas[1]){
respostas_MT[contadorAux,1]<-1
}
if(gabarito_MT[2]==vet_respostas[2]){
respostas_MT[contadorAux,2]<-1
}
if(gabarito_MT[3]==vet_respostas[3]){
respostas_MT[contadorAux,3]<-1
}
if(gabarito_MT[4]==vet_respostas[4]){
respostas_MT[contadorAux,4]<-1
}
if(gabarito_MT[5]==vet_respostas[5]){
respostas_MT[contadorAux,5]<-1
}
if(gabarito_MT[6]==vet_respostas[6]){
respostas_MT[contadorAux,6]<-1
}
if(gabarito_MT[7]==vet_respostas[7]){
respostas_MT[contadorAux,7]<-1
}
if(gabarito_MT[8]==vet_respostas[8]){
respostas_MT[contadorAux,8]<-1
}
if(gabarito_MT[9]==vet_respostas[9]){
respostas_MT[contadorAux,9]<-1
}
if(gabarito_MT[10]==vet_respostas[10]){
respostas_MT[contadorAux,10]<-1
}
if(gabarito_MT[11]==vet_respostas[11]){
respostas_MT[contadorAux,11]<-1
}
if(gabarito_MT[12]==vet_respostas[12]){
respostas_MT[contadorAux,12]<-1
}
if(gabarito_MT[13]==vet_respostas[13]){
respostas_MT[contadorAux,13]<-1
}
if(gabarito_MT[14]==vet_respostas[14]){
respostas_MT[contadorAux,14]<-1
}
if(gabarito_MT[15]==vet_respostas[15]){
respostas_MT[contadorAux,15]<-1
}
if(gabarito_MT[16]==vet_respostas[16]){
respostas_MT[contadorAux,16]<-1
}
if(gabarito_MT[17]==vet_respostas[17]){
respostas_MT[contadorAux,17]<-1
}
if(gabarito_MT[18]==vet_respostas[18]){
respostas_MT[contadorAux,18]<-1
}
if(gabarito_MT[19]==vet_respostas[19]){
respostas_MT[contadorAux,19]<-1
}
if(gabarito_MT[20]==vet_respostas[20]){
respostas_MT[contadorAux,20]<-1
}
if(gabarito_MT[21]==vet_respostas[21]){
respostas_MT[contadorAux,21]<-1
}
if(gabarito_MT[22]==vet_respostas[22]){
respostas_MT[contadorAux,22]<-1
}
if(gabarito_MT[23]==vet_respostas[23]){
respostas_MT[contadorAux,23]<-1
}
if(gabarito_MT[24]==vet_respostas[24]){
respostas_MT[contadorAux,24]<-1
}
if(gabarito_MT[25]==vet_respostas[25]){
respostas_MT[contadorAux,25]<-1
}
if(gabarito_MT[26]==vet_respostas[26]){
respostas_MT[contadorAux,26]<-1
}
if(gabarito_MT[27]==vet_respostas[27]){
respostas_MT[contadorAux,27]<-1
}
if(gabarito_MT[28]==vet_respostas[28]){
respostas_MT[contadorAux,28]<-1
}
if(gabarito_MT[29]==vet_respostas[29]){
respostas_MT[contadorAux,29]<-1
}
if(gabarito_MT[30]==vet_respostas[30]){
respostas_MT[contadorAux,30]<-1
}
if(gabarito_MT[31]==vet_respostas[31]){
respostas_MT[contadorAux,31]<-1
}
if(gabarito_MT[32]==vet_respostas[32]){
respostas_MT[contadorAux,32]<-1
}
if(gabarito_MT[33]==vet_respostas[33]){
respostas_MT[contadorAux,33]<-1
}
if(gabarito_MT[34]==vet_respostas[34]){
respostas_MT[contadorAux,34]<-1
}
if(gabarito_MT[35]==vet_respostas[35]){
respostas_MT[contadorAux,35]<-1
}
if(gabarito_MT[36]==vet_respostas[36]){
respostas_MT[contadorAux,36]<-1
}
if(gabarito_MT[37]==vet_respostas[37]){
respostas_MT[contadorAux,37]<-1
}
if(gabarito_MT[38]==vet_respostas[38]){
respostas_MT[contadorAux,38]<-1
}
if(gabarito_MT[39]==vet_respostas[39]){
respostas_MT[contadorAux,39]<-1
}
if(gabarito_MT[40]==vet_respostas[40]){
respostas_MT[contadorAux,40]<-1
}
if(gabarito_MT[41]==vet_respostas[41]){
respostas_MT[contadorAux,41]<-1
}
if(gabarito_MT[42]==vet_respostas[42]){
respostas_MT[contadorAux,42]<-1
}
if(gabarito_MT[43]==vet_respostas[43]){
respostas_MT[contadorAux,43]<-1
}
if(gabarito_MT[44]==vet_respostas[44]){
respostas_MT[contadorAux,44]<-1
}
if(gabarito_MT[45]==vet_respostas[45]){
respostas_MT[contadorAux,45]<-1
}
}
if(dados[i,4]==182){ #prova amarela
if(gabarito_MT[1]==vet_respostas[158-135]){
respostas_MT[contadorAux,1]<-1
}
if(gabarito_MT[2]==vet_respostas[159-135]){
respostas_MT[contadorAux,2]<-1
}
if(gabarito_MT[3]==vet_respostas[160-135]){
respostas_MT[contadorAux,3]<-1
}
if(gabarito_MT[4]==vet_respostas[146-135]){
respostas_MT[contadorAux,4]<-1
}
if(gabarito_MT[5]==vet_respostas[147-135]){
respostas_MT[contadorAux,5]<-1
}
if(gabarito_MT[6]==vet_respostas[148-135]){
respostas_MT[contadorAux,6]<-1
}
if(gabarito_MT[7]==vet_respostas[157-135]){
respostas_MT[contadorAux,7]<-1
}
if(gabarito_MT[8]==vet_respostas[165-135]){
respostas_MT[contadorAux,8]<-1
}
if(gabarito_MT[9]==vet_respostas[166-135]){
respostas_MT[contadorAux,9]<-1
}
if(gabarito_MT[10]==vet_respostas[167-135]){
respostas_MT[contadorAux,10]<-1
}
if(gabarito_MT[11]==vet_respostas[168-135]){
respostas_MT[contadorAux,11]<-1
}
if(gabarito_MT[12]==vet_respostas[169-135]){
respostas_MT[contadorAux,12]<-1
}
if(gabarito_MT[13]==vet_respostas[149-135]){
respostas_MT[contadorAux,13]<-1
}
if(gabarito_MT[14]==vet_respostas[150-135]){
respostas_MT[contadorAux,14]<-1
}
if(gabarito_MT[15]==vet_respostas[151-135]){
respostas_MT[contadorAux,15]<-1
}
if(gabarito_MT[16]==vet_respostas[152-135]){
respostas_MT[contadorAux,16]<-1
}
if(gabarito_MT[17]==vet_respostas[136-135]){
respostas_MT[contadorAux,17]<-1
}
if(gabarito_MT[18]==vet_respostas[137-135]){
respostas_MT[contadorAux,18]<-1
}
if(gabarito_MT[19]==vet_respostas[138-135]){
respostas_MT[contadorAux,19]<-1
}
if(gabarito_MT[20]==vet_respostas[139-135]){
respostas_MT[contadorAux,20]<-1
}
if(gabarito_MT[21]==vet_respostas[161-135]){
respostas_MT[contadorAux,21]<-1
}
if(gabarito_MT[22]==vet_respostas[162-135]){
respostas_MT[contadorAux,22]<-1
}
if(gabarito_MT[23]==vet_respostas[163-135]){
respostas_MT[contadorAux,23]<-1
}
if(gabarito_MT[24]==vet_respostas[164-135]){
respostas_MT[contadorAux,24]<-1
}
if(gabarito_MT[25]==vet_respostas[170-135]){
respostas_MT[contadorAux,25]<-1
}
if(gabarito_MT[26]==vet_respostas[171-135]){
respostas_MT[contadorAux,26]<-1
}
if(gabarito_MT[27]==vet_respostas[172-135]){
respostas_MT[contadorAux,27]<-1
}
if(gabarito_MT[28]==vet_respostas[176-135]){
respostas_MT[contadorAux,28]<-1
}
if(gabarito_MT[29]==vet_respostas[177-135]){
respostas_MT[contadorAux,29]<-1
}
if(gabarito_MT[30]==vet_respostas[178-135]){
respostas_MT[contadorAux,30]<-1
}
if(gabarito_MT[31]==vet_respostas[179-135]){
respostas_MT[contadorAux,31]<-1
}
if(gabarito_MT[32]==vet_respostas[180-135]){
respostas_MT[contadorAux,32]<-1
}
if(gabarito_MT[33]==vet_respostas[143-135]){
respostas_MT[contadorAux,33]<-1
}
if(gabarito_MT[34]==vet_respostas[144-135]){
respostas_MT[contadorAux,34]<-1
}
if(gabarito_MT[35]==vet_respostas[145-135]){
respostas_MT[contadorAux,35]<-1
}
if(gabarito_MT[36]==vet_respostas[140-135]){
respostas_MT[contadorAux,36]<-1
}
if(gabarito_MT[37]==vet_respostas[141-135]){
respostas_MT[contadorAux,37]<-1
}
if(gabarito_MT[38]==vet_respostas[142-135]){
respostas_MT[contadorAux,38]<-1
}
if(gabarito_MT[39]==vet_respostas[173-135]){
respostas_MT[contadorAux,39]<-1
}
if(gabarito_MT[40]==vet_respostas[174-135]){
respostas_MT[contadorAux,40]<-1
}
if(gabarito_MT[41]==vet_respostas[175-135]){
respostas_MT[contadorAux,41]<-1
}
if(gabarito_MT[42]==vet_respostas[153-135]){
respostas_MT[contadorAux,42]<-1
}
if(gabarito_MT[43]==vet_respostas[154-135]){
respostas_MT[contadorAux,43]<-1
}
if(gabarito_MT[44]==vet_respostas[155-135]){
respostas_MT[contadorAux,44]<-1
}
if(gabarito_MT[45]==vet_respostas[156-135]){
respostas_MT[contadorAux,45]<-1
}
}
if(dados[i,4]==181){ #prova azul
if(gabarito_MT[1]==vet_respostas[152-135]){
respostas_MT[contadorAux,1]<-1
}
if(gabarito_MT[2]==vet_respostas[153-135]){
respostas_MT[contadorAux,2]<-1
}
if(gabarito_MT[3]==vet_respostas[154-135]){
respostas_MT[contadorAux,3]<-1
}
if(gabarito_MT[4]==vet_respostas[144-135]){
respostas_MT[contadorAux,4]<-1
}
if(gabarito_MT[5]==vet_respostas[145-135]){
respostas_MT[contadorAux,5]<-1
}
if(gabarito_MT[6]==vet_respostas[146-135]){
respostas_MT[contadorAux,6]<-1
}
if(gabarito_MT[7]==vet_respostas[151-135]){
respostas_MT[contadorAux,7]<-1
}
if(gabarito_MT[8]==vet_respostas[155-135]){
respostas_MT[contadorAux,8]<-1
}
if(gabarito_MT[9]==vet_respostas[156-135]){
respostas_MT[contadorAux,9]<-1
}
if(gabarito_MT[10]==vet_respostas[157-135]){
respostas_MT[contadorAux,10]<-1
}
if(gabarito_MT[11]==vet_respostas[158-135]){
respostas_MT[contadorAux,11]<-1
}
if(gabarito_MT[12]==vet_respostas[159-135]){
respostas_MT[contadorAux,12]<-1
}
if(gabarito_MT[13]==vet_respostas[160-135]){
respostas_MT[contadorAux,13]<-1
}
if(gabarito_MT[14]==vet_respostas[161-135]){
respostas_MT[contadorAux,14]<-1
}
if(gabarito_MT[15]==vet_respostas[162-135]){
respostas_MT[contadorAux,15]<-1
}
if(gabarito_MT[16]==vet_respostas[163-135]){
respostas_MT[contadorAux,16]<-1
}
if(gabarito_MT[17]==vet_respostas[147-135]){
respostas_MT[contadorAux,17]<-1
}
if(gabarito_MT[18]==vet_respostas[148-135]){
respostas_MT[contadorAux,18]<-1
}
if(gabarito_MT[19]==vet_respostas[149-135]){
respostas_MT[contadorAux,19]<-1
}
if(gabarito_MT[20]==vet_respostas[150-135]){
respostas_MT[contadorAux,20]<-1
}
if(gabarito_MT[21]==vet_respostas[136-135]){
respostas_MT[contadorAux,21]<-1
}
if(gabarito_MT[22]==vet_respostas[137-135]){
respostas_MT[contadorAux,22]<-1
}
if(gabarito_MT[23]==vet_respostas[138-135]){
respostas_MT[contadorAux,23]<-1
}
if(gabarito_MT[24]==vet_respostas[139-135]){
respostas_MT[contadorAux,24]<-1
}
if(gabarito_MT[25]==vet_respostas[164-135]){
respostas_MT[contadorAux,25]<-1
}
if(gabarito_MT[26]==vet_respostas[165-135]){
respostas_MT[contadorAux,26]<-1
}
if(gabarito_MT[27]==vet_respostas[166-135]){
respostas_MT[contadorAux,27]<-1
}
if(gabarito_MT[28]==vet_respostas[170-135]){
respostas_MT[contadorAux,28]<-1
}
if(gabarito_MT[29]==vet_respostas[171-135]){
respostas_MT[contadorAux,29]<-1
}
if(gabarito_MT[30]==vet_respostas[172-135]){
respostas_MT[contadorAux,30]<-1
}
if(gabarito_MT[31]==vet_respostas[173-135]){
respostas_MT[contadorAux,31]<-1
}
if(gabarito_MT[32]==vet_respostas[174-135]){
respostas_MT[contadorAux,32]<-1
}
if(gabarito_MT[33]==vet_respostas[175-135]){
respostas_MT[contadorAux,33]<-1
}
if(gabarito_MT[34]==vet_respostas[176-135]){
respostas_MT[contadorAux,34]<-1
}
if(gabarito_MT[35]==vet_respostas[177-135]){
respostas_MT[contadorAux,35]<-1
}
if(gabarito_MT[36]==vet_respostas[178-135]){
respostas_MT[contadorAux,36]<-1
}
if(gabarito_MT[37]==vet_respostas[179-135]){
respostas_MT[contadorAux,37]<-1
}
if(gabarito_MT[38]==vet_respostas[180-135]){
respostas_MT[contadorAux,38]<-1
}
if(gabarito_MT[39]==vet_respostas[167-135]){
respostas_MT[contadorAux,39]<-1
}
if(gabarito_MT[40]==vet_respostas[168-135]){
respostas_MT[contadorAux,40]<-1
}
if(gabarito_MT[41]==vet_respostas[169-135]){
respostas_MT[contadorAux,41]<-1
}
if(gabarito_MT[42]==vet_respostas[140-135]){
respostas_MT[contadorAux,42]<-1
}
if(gabarito_MT[43]==vet_respostas[141-135]){
respostas_MT[contadorAux,43]<-1
}
if(gabarito_MT[44]==vet_respostas[142-135]){
respostas_MT[contadorAux,44]<-1
}
if(gabarito_MT[45]==vet_respostas[143-135]){
respostas_MT[contadorAux,45]<-1
}
}
if(dados[i,4]==180){ #prova cinza
if(gabarito_MT[1]==vet_respostas[148-135]){
respostas_MT[contadorAux,1]<-1
}
if(gabarito_MT[2]==vet_respostas[149-135]){
respostas_MT[contadorAux,2]<-1
}
if(gabarito_MT[3]==vet_respostas[150-135]){
respostas_MT[contadorAux,3]<-1
}
if(gabarito_MT[4]==vet_respostas[136-135]){
respostas_MT[contadorAux,4]<-1
}
if(gabarito_MT[5]==vet_respostas[137-135]){
respostas_MT[contadorAux,5]<-1
}
if(gabarito_MT[6]==vet_respostas[138-135]){
respostas_MT[contadorAux,6]<-1
}
if(gabarito_MT[7]==vet_respostas[143-135]){
respostas_MT[contadorAux,7]<-1
}
if(gabarito_MT[8]==vet_respostas[151-135]){
respostas_MT[contadorAux,8]<-1
}
if(gabarito_MT[9]==vet_respostas[152-135]){
respostas_MT[contadorAux,9]<-1
}
if(gabarito_MT[10]==vet_respostas[153-135]){
respostas_MT[contadorAux,10]<-1
}
if(gabarito_MT[11]==vet_respostas[154-135]){
respostas_MT[contadorAux,11]<-1
}
if(gabarito_MT[12]==vet_respostas[155-135]){
respostas_MT[contadorAux,12]<-1
}
if(gabarito_MT[13]==vet_respostas[162-135]){
respostas_MT[contadorAux,13]<-1
}
if(gabarito_MT[14]==vet_respostas[163-135]){
respostas_MT[contadorAux,14]<-1
}
if(gabarito_MT[15]==vet_respostas[164-135]){
respostas_MT[contadorAux,15]<-1
}
if(gabarito_MT[16]==vet_respostas[165-135]){
respostas_MT[contadorAux,16]<-1
}
if(gabarito_MT[17]==vet_respostas[139-135]){
respostas_MT[contadorAux,17]<-1
}
if(gabarito_MT[18]==vet_respostas[140-135]){
respostas_MT[contadorAux,18]<-1
}
if(gabarito_MT[19]==vet_respostas[141-135]){
respostas_MT[contadorAux,19]<-1
}
if(gabarito_MT[20]==vet_respostas[142-135]){
respostas_MT[contadorAux,20]<-1
}
if(gabarito_MT[21]==vet_respostas[144-135]){
respostas_MT[contadorAux,21]<-1
}
if(gabarito_MT[22]==vet_respostas[145-135]){
respostas_MT[contadorAux,22]<-1
}
if(gabarito_MT[23]==vet_respostas[146-135]){
respostas_MT[contadorAux,23]<-1
}
if(gabarito_MT[24]==vet_respostas[147-135]){
respostas_MT[contadorAux,24]<-1
}
if(gabarito_MT[25]==vet_respostas[156-135]){
respostas_MT[contadorAux,25]<-1
}
if(gabarito_MT[26]==vet_respostas[157-135]){
respostas_MT[contadorAux,26]<-1
}
if(gabarito_MT[27]==vet_respostas[158-135]){
respostas_MT[contadorAux,27]<-1
}
if(gabarito_MT[28]==vet_respostas[166-135]){
respostas_MT[contadorAux,28]<-1
}
if(gabarito_MT[29]==vet_respostas[167-135]){
respostas_MT[contadorAux,29]<-1
}
if(gabarito_MT[30]==vet_respostas[168-135]){
respostas_MT[contadorAux,30]<-1
}
if(gabarito_MT[31]==vet_respostas[169-135]){
respostas_MT[contadorAux,31]<-1
}
if(gabarito_MT[32]==vet_respostas[170-135]){
respostas_MT[contadorAux,32]<-1
}
if(gabarito_MT[33]==vet_respostas[178-135]){
respostas_MT[contadorAux,33]<-1
}
if(gabarito_MT[34]==vet_respostas[179-135]){
respostas_MT[contadorAux,34]<-1
}
if(gabarito_MT[35]==vet_respostas[180-135]){
respostas_MT[contadorAux,35]<-1
}
if(gabarito_MT[36]==vet_respostas[159-135]){
respostas_MT[contadorAux,36]<-1
}
if(gabarito_MT[37]==vet_respostas[160-135]){
respostas_MT[contadorAux,37]<-1
}
if(gabarito_MT[38]==vet_respostas[161-135]){
respostas_MT[contadorAux,38]<-1
}
if(gabarito_MT[39]==vet_respostas[171-135]){
respostas_MT[contadorAux,39]<-1
}
if(gabarito_MT[40]==vet_respostas[172-135]){
respostas_MT[contadorAux,40]<-1
}
if(gabarito_MT[41]==vet_respostas[173-135]){
respostas_MT[contadorAux,41]<-1
}
if(gabarito_MT[42]==vet_respostas[174-135]){
respostas_MT[contadorAux,42]<-1
}
if(gabarito_MT[43]==vet_respostas[175-135]){
respostas_MT[contadorAux,43]<-1
}
if(gabarito_MT[44]==vet_respostas[176-135]){
respostas_MT[contadorAux,44]<-1
}
if(gabarito_MT[45]==vet_respostas[177-135]){
respostas_MT[contadorAux,45]<-1
}
}
resposta <- NULL
}
write.csv(respostas_MT, file = "respostas_zeros_e_uns50k_MT.csv")
|
824ddbc566db7a4038dbb930ee54471d8ac46e39 | 8eb8cb8be6244905bf8cf4a7e60c924d961baf5e | /Big Data assignments/Lab 1 - Introduction to R/S20-BDA-Lab1.R | 6d20f23bf0e4535fc56c23fe8f41e77e5dfd1aa1 | [] | no_license | YahiaAbusaif/DataAnalysis | b8bf4c5f10dc04b0a71b6f126811c31a04fc2e4d | 8830fe5960eb5ec20066b3ea8aaf29d7a5721f91 | refs/heads/main | 2023-08-30T16:47:36.768168 | 2021-10-20T17:57:29 | 2021-10-20T17:57:29 | 374,000,811 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,131 | r | S20-BDA-Lab1.R | #Lab 1 Introduction to R Language
#--------------------------------------------------------------------------------#
#clean environment
rm(list=ls())
#display work items
ls()
#get working directory
getwd()
#set working directory
setwd("D:\\University\\TA\\2017-2018\\Spring 2018\\Big Data Analytics\\BDA-LAB1")
#--------------------------------------------------------------------------------#
#Getting Started#
str <- "Hello World"
print(str)
#--------------------------------------------------------------------------------#
#Comments#
#This is a single line comment in R.
#R does not support multi-line comments but you can perform a trick which is something as follows
if(FALSE) {
"This is a demo for multi-line comments and it should be put inside either a
single OR double quote"
}
#--------------------------------------------------------------------------------#
#In contrast to other programming languages like C and java in R, the variables are not
#declared as some data type. The variables are assigned with R-Objects and the data type
#of the R-object becomes the data type of the variable.
#There are many types of R-objects. The frequently used ones are:
#Vectors, Lists, Matrices, Arrays, Factors, Data Frames.
#vectors in R
#There are six data types of these atomic vectors:
v1 <- c(1,2,3,4.5,2,3) #numeric
v2 <- c("tree","street","car") #character
v3 <- c(TRUE , FALSE ,TRUE) #logical
v4 <- c(23L, 192L, 0L) #integer
v5 <- c(3+2i, 4-5i, -2i) #complex
v6 <- charToRaw("Hello") #raw
#Question:
v7 <- c("data", 22.5, TRUE) #What will be the type of v7?
#Get the length of a vector
length(v3)
#Extracting elements
#(1)
#Positive integers return elements at the specified positions (even if duplicate):
#=================================================================================
v1 <- c(10, 20, 30, 40, 50, 60)
v1[2]
v1[c(2,4)]
v1[c(4,4)]
v1[2:5]
v1[5:2]
v1[c(2.2,3.6)] #Real numbers are silently truncated to integers.
#(2)
#Negative integers omit elements at the specified positions:
#============================================================
v1 <- v1[-3]
v1[-c(2,1)]
#(3)
#Logical Vectors
#=================
v1 <- c(1,2,3,4.5,3,2)
v1>2
v1==2
v1!=2
v1[v1>2]
2%in%v1
9%in%v1
#If the logical vector is shorter than the vector being subsetted,
#it will be recycled to be the same length.
v1[v3]
v2[v3]
#(4) Sorting vectors and displaying information
#=============================================
#Sort elements.
sort(v1)
sort(v1, decreasing = TRUE) #Seek help for sort.int for example
#Display vectors' information
str(v1)
#Display summary of vectors (mean, median, min, max, 1st and 3rd quartiles)
summary(v1)
#(5)Assignment and vector manipulation
v4 <- v1[c(2,4)]
v1[3] -> v5
v6 = v4 + 2
v7<- v1+v4 #broadcasting
#--------------------------------------------------------------------------------#
#(5)Factors
#============================================
f <- factor(v1)
f
v8 <- c(v2, "car", "plane")
factor(v8)
#(6)lists
#============================================
list1 <- list(2,'car',TRUE)
list1
list1[[1]]
#Notice the difference
list2 <- c(2,'car',TRUE)
list2
l <- list(v1,v2)
l
summary(l)
str(l)
l[1]
l[2]
l[[1]]
l[[1]][4]
#Structured Data Types
#============================================
#(7) Matrix
#============
cells <- seq(10,90,by=10)
rnames <- c("R1", "R2","R3")
cnames <- c("C1", "C2","C3")
mymatrix <- matrix(cells, nrow = 3, ncol = 3, byrow =TRUE, dimnames = list(rnames, cnames))
mymatrix
#second column
mymatrix[,2]
#or equivalently
mymatrix[,"C2"]
#second and third column
mymatrix[,c("C2","C3")]
#first row
mymatrix[1,]
#all matrix except second row
mymatrix[-2,]
mymatrix[1,-3]
#--------------------------------------------------------------------------------#
#(8) IMPORTANT: Data Frames
#============================
d <- c(1,2,3,4,4,4)
e <- c("red", "white", "red", NA,"red","red")
f <- c(TRUE,TRUE,TRUE,FALSE,FALSE,NA)
mydata <- data.frame(d,e,f)
colnames(mydata) <- c("ID","Color","Passed") # variable names
mydata
# identify elements in data frames
mydata[1,] #extract first row of the data frame
mydata[2] #extract the second column
mydata[2:3] # columns 2,3 of data frame
mydata[c("ID","Color")] # columns ID and color from data frame
mydata$ID # variable ID in the data frame
mydata$Passed # variable Passed in the data frame
#Subsetting the dataframe based on one or more conditions.
subdfm<- subset(mydata, ID <= 3, select=c(ID,Color))
subdfm
subdfm<- subset(mydata, ID <= 3 & Color == 'red', select=c(ID,Color))
subdfm
#Can we write it in another way?
mydata[mydata$ID <= 3, c('ID', 'Color')]
#with(mydata, mydata[ID<=3, c('ID','Color')])
#(9) IMPORTANT: Tables
#=======================
#Create contingency table
t<- table(mydata$ID)
t
table(mydata$Color, mydata$Passed)
#(10)
#===============================================================
#Testing arguments whether they belong to a certain class or not
is.matrix(mymatrix)
is.list(mymatrix)
is.matrix(list1)
is.list(list1)
#Attempting to turn arguments into certain classes
vectorizedMatrix <- as.vector(mymatrix)
vectorizedMatrix
#(11)
#Importing data from csv files and reading data into a data frame
#================================================================
dfm <- read.csv("forestfires.csv")
dfm$X
#get dimensions of data frame
dim(dfm)
nrow(dfm)
ncol(dfm)
#visualize some of the data
head(dfm)
tail(dfm)
summary(dfm)
table(dfm$month)
table(dfm$month, dfm$day)
#--------------------------------------------------------------------------------#
#examples of importing files
#text files
dftxt <- read.table("testfile.txt",header = FALSE)
dftxt
#from csv file
dfcsv <- read.csv("csvone.csv",header = TRUE)
dfcsv
#--------------------------------------------------------------------------------#
#(12)Graphical and Statistical Functions
#=======================================
#Graphical functions
v1 <- c(1,2,3,3.6,4.5,2,3) #numeric
plot(v1, type="b") #Check the type of plot
#There are many plots that can be drawn: pie chart, bar plot, box and whisker plot.
#Check them.
hist(v1)
#Statistical functions
mean(v1)
median(v1)
sd(v1)
var(v1)
#--------------------------------------------------------------------------------#
#(13)Functions
#=============
#Function to calculate the Euclidean distance between two 2D points.
euclideanDistance <- function(x,y) sqrt((x[1] - x[2])^2 + (y[1] - y[2])^2)
euclideanDistance(c(2,3), c(4,5))
#--------------------------------------------------------------------------------#
#(14) Flow control statements
#=============================
names <- c('Ali', 'Hussein', 'Ahmed')
if ('Ali' %in% names) {
print('Ali exists')
} else if ('Hussein' %in% names) { #Note that you should write the closing braces together with else if keyword on the same line
print('Hussein exists')
} else {
print('Neither Ali nor Hussein exists')
}
for (name in names)
print(name)
#--------------------------------------------------------------------------------#
#(15)String Manipulation
#=======================
paste(names[1], names[2], names[3])
paste(names[1], names[2], names[3], sep= "+")
toupper(names[1])
tolower(names[2])
|
e6fe5f80917c86024d722ad8093513f9e254f539 | c5de5d072f5099e7f13b94bf2c81975582788459 | /R Extension/RMG/Energy/Trading/PortfolioAnalysis/overnightPortfolioReport.r | a61e7a9dc3cbf34d2d386886616692bbe5706ea1 | [] | no_license | uhasan1/QLExtension-backup | e125ad6e3f20451dfa593284507c493a6fd66bb8 | 2bea9262841b07c2fb3c3495395e66e66a092035 | refs/heads/master | 2020-05-31T06:08:40.523979 | 2015-03-16T03:09:28 | 2015-03-16T03:09:28 | 190,136,053 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 629 | r | overnightPortfolioReport.r |
rm(list=ls())
options <- NULL
options$nFieldLimit <- 800
options$listFields <- c("VAR", "DELTA")
#options$listFields <- c("DELTA")
require(reshape)
require(RODBC)
source("H:/user/R/RMG/TimeFunctions/add.date.R")
today <- as.Date(format(Sys.time(), "%Y-%m-%d"))
#today <- as.Date("2007-08-01")
options$dates <- add.date(today, "-1 b")
#options$dates <- as.Date("2007-08-31")
options$portfolio <- "Mark Orman Netted Portfolio"
source("H:/user/R/RMG/Energy/Trading/PortfolioAnalysis/mainPortfolioReportE.r")
runtime <- as.numeric(format(Sys.time(), "%H%M"))
if (runtime <= 1900) {
mainPortfolioReportE(options)
} |
0e30ea34ff9b416b804c4a09568e52b89483ea3e | c2787065aaa17cc41773ac5865f4bc9218531592 | /GBLUPs/ReplicateReduction/Replicate_Red_F0_GBLUP_iSize_20200222.R | b4bcdb72d9fb4f528e76f8aa2c66731f8fb78f8c | [] | no_license | cks2903/White_Clover_GenomicPrediction_2020 | 94c2b8439e6f1f39984cd1c0fc9b48e1aaaa90e0 | aeb1147d640cf4326a619ed14806b9132db6ba98 | refs/heads/master | 2023-07-22T00:11:52.303852 | 2021-09-06T10:04:06 | 2021-09-06T10:04:06 | 295,721,634 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,704 | r | Replicate_Red_F0_GBLUP_iSize_20200222.R | ###################################################################################################################
###################################################################################################################
### this is a script to run GBLUP on replicate data removing one replicate pr. genotype each round ###
###################################################################################################################
###################################################################################################################
# Load libraries
{
library(lme4)
library(BGLR)
library("parallel")
library("methods")
library("Matrix")
library("MASS")
}
# Define some variables
{
args=commandArgs(trailingOnly = TRUE)
print(args)
round=args[2]
}
# Load data
{
d <- read.csv("/home/cks/NChain/faststorage/WHITE_CLOVER/RNASEQ/HyLiTE/LD_filtering_RNASeq/GP_20190919_GPD_onReplicateData/greenhouse_area.csv", header = TRUE, sep = ",")
f=read.csv("/home/cks/NChain/faststorage/WHITE_CLOVER/RNASEQ/HyLiTE/LD_filtering_RNASeq/GP_20190919_GPD_onReplicateData/2018_weight.csv",header=T,sep=";")
colnames(f)[1]="Barcode"
df=merge(d,f,by="Barcode")
d=df
}
# Calculate growth pr. day
{
d$days_of_growth <- as.Date(d$harvest_date, format = "%d/%m/%y") - as.Date(d$inoculation_date, format = "%d/%m/%y")
d$growth_per_day <- d$weight.y/as.numeric(d$days_of_growth)
}
# Remove plants that has been found to not show growth between day 10 and 20 dpi and drop in growth from day 10 dpi to the last day it was measured
{
remove = read.table("Barcodes_removed_based_on_single_Observations_2021-01-06.txt")
#remove = read.table("/Volumes/NAT_MBG-PMg/Cathrine/Nchain/Genomic_prediction_yield_July2020/V2_LessHarsh_SaraQualityFilter/New_fixation_Trait_20210106/Barcodes_removed_based_on_single_Observations_2021-01-06.txt")
removeidx = which(d$Barcode %in% remove)
if (length(removeidx)==0){
print("barcodes that showed weird behaviour have already been removed.")
d005=d
} else{
d005 = d[-removeidx,]
}
}
# Sort out plants that were inoculated with no rhizobium, SM73 or had n_stolon=0. These are errors and don't grow
{
d0=na.omit(d005)
d0$Clover=as.character(d0$Clover)
d0$Clover[which(d0$Clover=="AAran_0104")]="Aaran_0104"
d0$Clover[which(d0$Clover=="AAran_0206")]="Aaran_0206"
d0$Clover=as.factor(d0$Clover)
d0=d0[-which(d0$rhizobium=="SM73"),]
d0=d0[-which(d0$rhizobium=="NO"),]
d0=d0[-which(d0$n_stolons==0),]
d0=d0[-which(d0$n_stolons==1),]
d0=d0[-which(d0$n_stolons==2),]
d0=d0[-which(d0$n_stolons==3),]
d2=d0
}
# Load genomic relationship matrix and make sure clover genotypes match data
{
GRM=read.table(args[1],sep=",",header=T)
dim(GRM)
d2$Clovershort <- strtrim(d2$Clover,8)
d3=d2[order(d2$Clovershort,decreasing=F),]
length(unique(d3$Clovershort)) #149
d4=d3[which(d3$Clovershort %in% colnames(GRM)),]
length(unique(d4$Clovershort)) #147 unique genotypes with GPD data
remove=GRM[which(colnames(GRM) %in% d4$Clovershort),which(colnames(GRM) %in% d4$Clovershort)]
print(remove)
GRM1=GRM[which(colnames(GRM) %in% d4$Clovershort),which(colnames(GRM) %in% d4$Clovershort)]
dim(GRM1)
nrow(GRM1)==length(unique(d4$Clovershort))
GRM1=data.matrix(GRM1)
length(colnames(GRM1)==unique(d4$Clovershort))==nrow(GRM1) #check
}
# Aberpearl_07 contribute 700 of the datapoints and thus influence the variance a lot. Cut down Aberpearl_07 data so that we have only 6 different Rhizobia left like the other clovers
{
Aberpearl_07=which(d4$Clovershort=="Aearl_07")
Inocolums=unique(d4$Rhizobium[Aberpearl_07])
set.seed(15)
sample=sample(Inocolums,6)
#sample=c("MIX","SM22","SM25","SM149C","SM31","SM155A")
print(sample)
which(d4$Rhizobium[Aberpearl_07]==sample[1]) #4
which(d4$Rhizobium[Aberpearl_07]==sample[2]) #4
which(d4$Rhizobium[Aberpearl_07]==sample[3]) #4
which(d4$Rhizobium[Aberpearl_07]==sample[4]) #4
which(d4$Rhizobium[Aberpearl_07]==sample[5]) #4
which(d4$Rhizobium[Aberpearl_07]==sample[6]) #4
remove=which((d4$Rhizobium[Aberpearl_07] %in% sample)==FALSE)
d4=d4[-Aberpearl_07[remove],]
nrow(d4)
}
{
d4$roundRep <- paste(d4$Round, d4$Replicate, sep='_')
#d6=d4[-which(d4$roundRep=="1_2"),]
d6=d4
nrow(d6)
length(which(d6$Clovershort=="Aearl_07"))
}
# Clean up
{
d6$Rhizobium=droplevels(d6$Rhizobium) # removing levels not used in actual data
d6$Clover=droplevels(d6$Clover) # removing levels not used in actual data
d6=d6[order(d6$Clovershort),] # make sure it is in alphabetic order like the GRM
}
# Remove all genotypes that has <10 replicates
Genotypes=(unique(d6$Clovershort))
for (genotype in Genotypes){
idx=which(d6$Clovershort==genotype)
if (length(idx)<10){
d6=d6[-idx,]
print(paste(genotype,"removed",sep=" "))
GRMidx = which(colnames(GRM1)==genotype)
GRM1 = GRM1[-GRMidx,-GRMidx]
}
}
# Clean up
{
d6$Rhizobium=droplevels(d6$Rhizobium) # removing levels not used in actual data
d6$Clover=droplevels(d6$Clover) # removing levels not used in actual data
d6=d6[order(d6$Clovershort),] # make sure it is in alphabetic order like the GRM
}
# Divide into 6 populations for GP
set.seed(NULL)
tst=sample(1:length(unique(d6$Clovershort)),size=length(unique(d6$Clovershort)),replace=FALSE)
k=6
testing_pop=split(tst, sort(tst%%k))
tst1=testing_pop[1]$'0'
tst2=testing_pop[2]$'1'
tst3=testing_pop[3]$'2'
tst4=testing_pop[4]$'3'
tst5=testing_pop[5]$'4'
tst6=testing_pop[6]$'5'
testpop1=unique(d6$Clovershort)[tst1]
testpop2=unique(d6$Clovershort)[tst2]
testpop3=unique(d6$Clovershort)[tst3]
testpop4=unique(d6$Clovershort)[tst4]
testpop5=unique(d6$Clovershort)[tst5]
testpop6=unique(d6$Clovershort)[tst6]
grouping=list(testpop1,testpop2,testpop3,testpop4,testpop5,testpop6)
name=paste("grouping",round,".txt",sep="")
sink(name)
print(grouping)
sink()
############################################################
# Now remove replicates so each genotype has a maximum of of desired number (maxreplicates) and calculate mean phenotypes based on replicates left
removereplicates <- function(maxreplicates,dataframe){
set.seed(NULL)
for (genotype in Genotypes){
replicateidx=which(dataframe$Clovershort==genotype)
if (length(replicateidx)>maxreplicates){
numbertoremove=length(replicateidx)-maxreplicates
remove=sample(replicateidx,numbertoremove)
dataframe=dataframe[-remove,]
}
print(paste("Number of replicates pr. genotype has been reduced to:",maxreplicates,sep=""))
iSizemeans=aggregate(dataframe$InitialSize, list(dataframe$Clovershort), mean) # calculate averages from reduced dataframe
colnames(iSizemeans)=c("Clovershort","InitialSize")
}
return(list(iSizemeans,dataframe))
}
testpop_generator<-function(dataframe){
#Find indexes for test population
testpop1_idx=which(dataframe$Clovershort %in% testpop1)
testpop2_idx=which(dataframe$Clovershort %in% testpop2)
testpop3_idx=which(dataframe$Clovershort %in% testpop3)
testpop4_idx=which(dataframe$Clovershort %in% testpop4)
testpop5_idx=which(dataframe$Clovershort %in% testpop5)
testpop6_idx=which(dataframe$Clovershort %in% testpop6)
tests=list(testpop1_idx,testpop2_idx,testpop3_idx,testpop4_idx,testpop5_idx,testpop6_idx)
return(tests)
}
GP_GBLUP<-function(testpop){
################ ################ ################ ################
##start by estimating GEBVs for training population individuals
################ ################ ################ ################
iSizemeans_training=dataframe[-testpop,] # limit the dataframe to only the individuals allowed for training the model
iSizemeans_training_ready=na.omit(iSizemeans_training, cols = c("iSize")) # remember that gpd na inidividuals should be removed whether or not they are in the training pop or not
ind_not_in_train=dataframe$Clovershort[testpop]
IndividualsToRemoveGRM=which(colnames(GRM1) %in% ind_not_in_train)
GRM_trn = GRM1[-IndividualsToRemoveGRM,-IndividualsToRemoveGRM]
# Run the GBLUP model on full training population to extract GEBVs
yNA=iSizemeans_training_ready$InitialSize
ETA=list(list(K=GRM_trn,model="RKHS"))
GBLUP=BGLR(y=yNA,response_type = "gaussian",ETA=ETA,nIter=20000,burnIn = 5000,verbose=F,saveAt=paste("GBLUP",round))
matrix=cbind(as.character(iSizemeans_training_ready$Clovershort),as.numeric(iSizemeans_training_ready$InitialSize),as.numeric(GBLUP$ETA[[1]]$u))
colnames(matrix)=c("ID", "Observed", "GEBV")
GEBV_contribution1data=as.numeric(as.character(matrix[,3]))
################ ################
## Now predict testing population
################ ################
GRMforpred_test = GRM1[testpop,testpop] # GRM for individuals that will be predicted
GRMforpred_covar = GRM1[testpop,-testpop] # Covariance between training and testing pop.
#GEBVpred_contr1 = GcloverReps_covar%*%solve(GcloverReps_trn) %*% GEBV_contribution1data
GEBVpred = GRMforpred_covar%*%ginv(GRM_trn) %*% GEBV_contribution1data
#GEBVpred_contr1 = GcloverReps_covar%*%solve(GcloverReps_trn + diag(0.01, 1661, 1661)) %*% GEBV_contribution1data
# Output matrix with prediction results
matrix1=cbind(as.character(dataframe$Clovershort[testpop]),as.numeric(dataframe$InitialSize[testpop]),as.numeric(as.character(GEBVpred)))
colnames(matrix1)=c("ID", "Observed", "GEBV")
return(matrix1)
}
#Apply so maximum of replicates is 10
{
run10=removereplicates(10,d6)
Only10reps_avg=run10[[1]]
Only10reps=run10[[2]]
head(Only10reps_avg)
tests=testpop_generator(Only10reps_avg)
dataframe = Only10reps_avg
print("Starting GBLUP prediction")
results10=mclapply(tests, GP_GBLUP)
}
# Summarize and make files with results
{
first=results10[[1]]
second=results10[[2]]
third=results10[[3]]
fourth=results10[[4]]
fifth=results10[[5]]
sixth=results10[[6]]
All=rbind(first,second,third,fourth,fifth,sixth)
correlation=cor(as.numeric(as.character(All[,2])),as.numeric(as.character(All[,3]))) #means of replicates
correlation
filename=paste("Correlation_GBLUP_iSize_10Replicates",round,".txt",sep="")
write.table(correlation,filename,sep="\t",quote=F,row.names=F,col.names=F)
filename1=paste("Predictions_GBLUP_iSize_10Replicates",round,".txt",sep="")
write.table(All,filename1,sep="\t",quote=F,row.names=F)
}
#Apply so maximum of replicates is 9
{
run9=removereplicates(9,Only10reps)
Only9reps_avg=run9[[1]]
Only9reps=run9[[2]]
tests=testpop_generator(Only9reps_avg)
dataframe = Only9reps_avg
print("Starting GBLUP prediction")
results9=mclapply(tests, GP_GBLUP)
}
# Summarize and make files with results
{
first=results9[[1]]
second=results9[[2]]
third=results9[[3]]
fourth=results9[[4]]
fifth=results9[[5]]
sixth=results9[[6]]
All=rbind(first,second,third,fourth,fifth,sixth)
correlation=cor(as.numeric(as.character(All[,2])),as.numeric(as.character(All[,3]))) #means of replicates
correlation
filename=paste("Correlation_GBLUP_iSize_9Replicates",round,".txt",sep="")
write.table(correlation,filename,sep="\t",quote=F,row.names=F,col.names=F)
filename1=paste("Predictions_GBLUP_iSize_9Replicates",round,".txt",sep="")
write.table(All,filename1,sep="\t",quote=F,row.names=F)
}
#Apply so maximum of replicates is 8
{
run8=removereplicates(8,Only9reps)
Only8reps_avg=run8[[1]]
Only8reps=run8[[2]]
tests=testpop_generator(Only8reps_avg)
dataframe = Only8reps_avg
print("Starting GBLUP prediction")
results8=mclapply(tests, GP_GBLUP)
}
# Summarize and make files with results
{
first=results8[[1]]
second=results8[[2]]
third=results8[[3]]
fourth=results8[[4]]
fifth=results8[[5]]
sixth=results8[[6]]
All=rbind(first,second,third,fourth,fifth,sixth)
correlation=cor(as.numeric(as.character(All[,2])),as.numeric(as.character(All[,3]))) #means of replicates
correlation
filename=paste("Correlation_GBLUP_iSize_8Replicates",round,".txt",sep="")
write.table(correlation,filename,sep="\t",quote=F,row.names=F,col.names=F)
filename1=paste("Predictions_GBLUP_iSize_8Replicates",round,".txt",sep="")
write.table(All,filename1,sep="\t",quote=F,row.names=F)
}
#Apply so maximum of replicates is 7
{
run7=removereplicates(7,Only8reps)
Only7reps_avg=run7[[1]]
Only7reps=run7[[2]]
tests=testpop_generator(Only7reps_avg)
dataframe = Only7reps_avg
print("Starting GBLUP prediction")
results7=mclapply(tests, GP_GBLUP)
}
# Summarize and make files with results
{
first=results7[[1]]
second=results7[[2]]
third=results7[[3]]
fourth=results7[[4]]
fifth=results7[[5]]
sixth=results7[[6]]
All=rbind(first,second,third,fourth,fifth,sixth)
correlation=cor(as.numeric(as.character(All[,2])),as.numeric(as.character(All[,3]))) #means of replicates
correlation
filename=paste("Correlation_GBLUP_iSize_7Replicates",round,".txt",sep="")
write.table(correlation,filename,sep="\t",quote=F,row.names=F,col.names=F)
filename1=paste("Predictions_GBLUP_iSize_7Replicates",round,".txt",sep="")
write.table(All,filename1,sep="\t",quote=F,row.names=F)
}
#Apply so maximum of replicates is 6
{
run6= removereplicates(6,Only7reps)
Only6reps_avg=run6[[1]]
Only6reps=run6[[2]]
tests=testpop_generator(Only6reps_avg)
dataframe = Only6reps_avg
print("Starting GBLUP prediction")
results6=mclapply(tests, GP_GBLUP)
}
# Summarize and make files with results
{
first=results6[[1]]
second=results6[[2]]
third=results6[[3]]
fourth=results6[[4]]
fifth=results6[[5]]
sixth=results6[[6]]
All=rbind(first,second,third,fourth,fifth,sixth)
correlation=cor(as.numeric(as.character(All[,2])),as.numeric(as.character(All[,3]))) #means of replicates
correlation
filename=paste("Correlation_GBLUP_iSize_6Replicates",round,".txt",sep="")
write.table(correlation,filename,sep="\t",quote=F,row.names=F,col.names=F)
filename1=paste("Predictions_GBLUP_iSize_6Replicates",round,".txt",sep="")
write.table(All,filename1,sep="\t",quote=F,row.names=F)
}
#Apply so maximum of replicates is 5
{
run5 = removereplicates(5,Only6reps)
Only5reps_avg=run5[[1]]
Only5reps=run5[[2]]
tests=testpop_generator(Only5reps_avg)
dataframe = Only5reps_avg
print("Starting GBLUP prediction")
results5=mclapply(tests, GP_GBLUP)
}
# Summarize and make files with results
{
first=results5[[1]]
second=results5[[2]]
third=results5[[3]]
fourth=results5[[4]]
fifth=results5[[5]]
sixth=results5[[6]]
All=rbind(first,second,third,fourth,fifth,sixth)
correlation=cor(as.numeric(as.character(All[,2])),as.numeric(as.character(All[,3]))) #means of replicates
correlation
filename=paste("Correlation_GBLUP_iSize_5Replicates",round,".txt",sep="")
write.table(correlation,filename,sep="\t",quote=F,row.names=F,col.names=F)
filename1=paste("Predictions_GBLUP_iSize_5Replicates",round,".txt",sep="")
write.table(All,filename1,sep="\t",quote=F,row.names=F)
}
#Apply so maximum of replicates is 4
{
run4= removereplicates(4,Only5reps)
Only4reps_avg=run4[[1]]
Only4reps=run4[[2]]
tests=testpop_generator(Only4reps_avg)
dataframe = Only4reps_avg
print("Starting GBLUP prediction")
results4=mclapply(tests, GP_GBLUP)
}
# Summarize and make files with results
{
first=results4[[1]]
second=results4[[2]]
third=results4[[3]]
fourth=results4[[4]]
fifth=results4[[5]]
sixth=results4[[6]]
All=rbind(first,second,third,fourth,fifth,sixth)
correlation=cor(as.numeric(as.character(All[,2])),as.numeric(as.character(All[,3]))) #means of replicates
correlation
filename=paste("Correlation_GBLUP_iSize_4Replicates",round,".txt",sep="")
write.table(correlation,filename,sep="\t",quote=F,row.names=F,col.names=F)
filename1=paste("Predictions_GBLUP_iSize_4Replicates",round,".txt",sep="")
write.table(All,filename1,sep="\t",quote=F,row.names=F)
}
#Apply so maximum of replicates is 3
{
run3= removereplicates(3,Only4reps)
Only3reps_avg=run3[[1]]
Only3reps=run3[[2]]
tests=testpop_generator(Only3reps_avg)
dataframe = Only3reps_avg
print("Starting GBLUP prediction")
results3=mclapply(tests, GP_GBLUP)
}
# Summarize and make files with results
{
first=results3[[1]]
second=results3[[2]]
third=results3[[3]]
fourth=results3[[4]]
fifth=results3[[5]]
sixth=results3[[6]]
All=rbind(first,second,third,fourth,fifth,sixth)
correlation=cor(as.numeric(as.character(All[,2])),as.numeric(as.character(All[,3]))) #means of replicates
correlation
filename=paste("Correlation_GBLUP_iSize_3Replicates",round,".txt",sep="")
write.table(correlation,filename,sep="\t",quote=F,row.names=F,col.names=F)
filename1=paste("Predictions_GBLUP_iSize_3Replicates",round,".txt",sep="")
write.table(All,filename1,sep="\t",quote=F,row.names=F)
}
#Apply so maximum of replicates is 2
{
run2=removereplicates(2,Only3reps)
Only2reps_avg=run2[[1]]
Only2reps=run2[[2]]
tests=testpop_generator(Only2reps_avg)
dataframe = Only2reps_avg
print("Starting GBLUP prediction")
results2=mclapply(tests, GP_GBLUP)
}
# Summarize and make files with results
{
first=results2[[1]]
second=results2[[2]]
third=results2[[3]]
fourth=results2[[4]]
fifth=results2[[5]]
sixth=results2[[6]]
All=rbind(first,second,third,fourth,fifth,sixth)
correlation=cor(as.numeric(as.character(All[,2])),as.numeric(as.character(All[,3]))) #means of replicates
correlation
filename=paste("Correlation_GBLUP_iSize_2Replicates",round,".txt",sep="")
write.table(correlation,filename,sep="\t",quote=F,row.names=F,col.names=F)
filename1=paste("Predictions_GBLUP_iSize_2Replicates",round,".txt",sep="")
write.table(All,filename1,sep="\t",quote=F,row.names=F)
}
#Apply so maximum of replicates is 1
{
run1=removereplicates(1,Only2reps)
Only1reps_avg=run1[[1]]
Only1reps=run1[[2]]
tests=testpop_generator(Only1reps_avg)
dataframe = Only1reps_avg
print("Starting GBLUP prediction")
results1=mclapply(tests, GP_GBLUP)
}
# Summarize and make files with results
{
first=results1[[1]]
second=results1[[2]]
third=results1[[3]]
fourth=results1[[4]]
fifth=results1[[5]]
sixth=results1[[6]]
All=rbind(first,second,third,fourth,fifth,sixth)
correlation=cor(as.numeric(as.character(All[,2])),as.numeric(as.character(All[,3]))) #means of replicates
correlation
filename=paste("Correlation_GBLUP_iSize_1Replicates",round,".txt",sep="")
write.table(correlation,filename,sep="\t",quote=F,row.names=F,col.names=F)
filename1=paste("Predictions_GBLUP_iSize_1Replicates",round,".txt",sep="")
write.table(All,filename1,sep="\t",quote=F,row.names=F)
}
|
151f4a4d259947d03f08c14cd49698a6c2b06ce2 | f042fbdf31a2106bfbe298b32dc0aa551bd3ae84 | /man/netcdf.extract.points.as.sf.Rd | eea82918e0faa7dfa0760ac955a48fc6eccf7159 | [] | no_license | danielbonhaure/weather-generator | c76969967c3a60500a6d90d5931a88fb44570eba | 6a207415fb53cca531b4c6be691ff2d7d221167d | refs/heads/gamwgen | 2023-01-21T17:38:46.102213 | 2020-12-04T21:59:05 | 2020-12-04T21:59:05 | 286,565,700 | 0 | 0 | null | 2020-12-01T13:19:05 | 2020-08-10T19:50:16 | R | UTF-8 | R | false | true | 359 | rd | netcdf.extract.points.as.sf.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\name{netcdf.extract.points.as.sf}
\alias{netcdf.extract.points.as.sf}
\title{Extract specific points from netcdf4, as sf}
\usage{
netcdf.extract.points.as.sf(netcdf_filename, points_to_extract)
}
\description{
Extract specific points from a netcdf4 file, as a sf object.
}
|
e0d469fcd4f86da322f82da56c6854745227a8e7 | 621a7021bf0ce15f789b1b591bce43095cbc54b3 | /model-functions.R | b04f8a94ebb899e9ac47712a921d20a4ba8a7125 | [] | no_license | tivanics/Capstone | bd5a806ff971dace2487b90847156a34153bb147 | f3a07d8fe77942227fbcc3067f1ff923ea32fde3 | refs/heads/main | 2023-04-29T14:11:44.643817 | 2021-05-16T18:36:23 | 2021-05-16T18:36:23 | 366,922,999 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,062 | r | model-functions.R | library(survival)
library(tidyverse)
library(readr)
library(dplyr)
library(ggplot2)
library(tidyr)
library(lubridate)
library(survminer)
library(msm)
library(haven)
library(ggsci)
library(plotly)
library(scales)
options(scipen = 999)
# Load model objects
#Baseline (no covariates)
#HCC.msm <- readRDS(file = "data/HCC.msm.rds")
#Preop (sex, age)
HCC.preop <- readRDS(file = "data/HCC.preop.rds")
#Postop (age, sex, pathology variables including microvascular invasion, satellite lesion, tumor number tumor size)
HCC.postop <- readRDS(file = "data/HCC.postop.rds")
# This function is from: https://www.r-bloggers.com/2016/07/round-values-while-preserve-their-rounded-sum-in-r/
# The function preserves the overall sum and rounds the number to a specified number of digits
round_preserve_sum <- function(x, digits = 0) {
up <- 10 ^ digits
x <- x * up
y <- floor(x)
indices <- tail(order(x-y), round(sum(x)) - sum(y))
y[indices] <- y[indices] + 1
y / up
}
pmatrix_calculatorpreop <- function(input,
sex,
age,
steps=3, last=60){
timepoints <- c(seq(from=0, to=last, by=steps))
probestimates <- NULL
for(i in 1:(last / steps + 1)) {
probestimates <-
cbind(probestimates, pmatrix.msm(
input,
covariates = list(GenderMale1female0 = sex, Age = age),
t = timepoints[i]
))
}
return(probestimates)
}
pmatrix_calculatorpostop <- function(input,
sex,
age,
solitary,
satellite,
microvascular,
size,
steps=3, last=60) {
timepoints <- c(seq(from = 0, to = last, by = steps))
probestimates <- NULL
for (i in 1:(last / steps + 1)) {
probestimates <-
cbind(probestimates, pmatrix.msm(
input,
covariates = list(
GenderMale1female0 = sex,
Age = age,
Path_number_solitary = solitary,
Satellite_lesion_path = satellite,
Microvascular_invasionnotabletobeassessedindeterminate9 = microvascular,
Path_size_5cm = size
),
t = timepoints[i]
))
}
return(probestimates)
}
# by default, the plot generated by preparePlot() is the baseline model for
# the postsurgery state
preparePlot <- function(input = "preop", state = "Surgery",
sex = "Female", age = 18, solitary = 0,
satellite = "No", microvascular = 0, size = 0,
steps = 3, last = 60, by_year=FALSE) {
series <- data.frame(
Months = as.numeric(rep(seq(0, last, by = steps), each = 8)),
Probability = pmatrix_calculatorpreop(HCC.msm, steps, last)[state, ],
State <- rep(c("No recurrence", "1st intra-hepatic recurrence", "2nd intra-hepatic recurrence",
"3rd intra-hepatic recurrence","4th intra-hepatic recurrence",
"5th intra-hepatic recurrence","Distant recurrence", "Death"), 21)
)
if(input == "postop") {
series$Probability <- pmatrix_calculatorpostop(HCC.postop,
sex,
age,
solitary,
satellite,
microvascular,
size,
steps, last)[state,]
series$Probability <- round_preserve_sum(series$Probability, digits = 4)
}
series$State <-
factor(
series$State,
levels = c("No recurrence", "1st intra-hepatic recurrence", "2nd intra-hepatic recurrence",
"3rd intra-hepatic recurrence","4th intra-hepatic recurrence",
"5th intra-hepatic recurrence","Distant recurrence", "Death")
)
if(input == "postop"){
print("in filter condition")
print(state)
if(state == "First local recurrence") {
series <- dplyr::filter(series, State != "No recurrence")
print(head(series))
}
else if(state == "Second local recurrence") {
series <- dplyr::filter(series,
State != "1st intra-hepatic recurrence" &
State != "No recurrence")
print(head(series))
}
}
# Colour scale consistency:
colourLevels <- c("No recurrence", "1st intra-hepatic recurrence", "2nd intra-hepatic recurrence",
"3rd intra-hepatic recurrence","4th intra-hepatic recurrence",
"5th intra-hepatic recurrence","Distant recurrence", "Death")
myColours <- get_palette(palette = "Reds", 9)
names(myColours) <- colourLevels
#Create the plot
seriesPlot <-
ggplot(series, aes(Months, Probability),
cex.axis = 3.0) +
geom_area(aes(fill = State)) +
scale_x_continuous(limits = c(0, 60), expand = c(0, 1)) +
scale_y_continuous(labels = scales::percent) +
coord_cartesian(xlim = c(0, 60), ylim = c(0, 1), expand = F) +
theme_bw() +
scale_fill_manual(values = myColours, drop = FALSE) +
theme(
panel.grid = element_blank(),
panel.border = element_blank(),
text = element_text(size = 14),
legend.title = element_blank(),
axis.title=element_text(size = 14, face = "bold")
)
seriesPlotly <- ggplotly(seriesPlot) %>%
layout(legend = list(
font = list(size = 14),
title = list(text = '<b>State</b>',
font = list(size = 16))
)) %>%
layout(xaxis = list(fixedrange = TRUE)) %>%
layout(yaxis = list(fixedrange = TRUE))
return(seriesPlotly)
}
|
18467eba1e18d7f488d035eae4d89fd48ce96cc3 | 55e8db068fbb5fae93e946b4d94ca7820a8b88b9 | /man/getConnections.Rd | e151a96c75a81dd92274521b3791e096d4681534 | [
"WTFPL",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | cnxtech/moneRo | cc80786ba5b85d21a1aeaaa5d39f2d8c47d770f1 | f78f82a9714f8dd214e2b556d94615163268c70a | refs/heads/master | 2020-07-02T02:02:34.994096 | 2017-09-24T04:08:43 | 2017-09-24T04:08:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,561 | rd | getConnections.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/monero.R
\name{getConnections}
\alias{getConnections}
\title{getConnections}
\usage{
getConnections(ip = getOption("monerod.ip", "127.0.0.1"),
port = getOption("monerod.port", 18081))
}
\arguments{
\item{ip}{daemon ip address}
\item{port}{daemon port}
}
\value{
connections - List of all connections and their info
\itemize{
\item avg_download unsigned int; Average bytes of data downloaded by node.
\item avg_upload unsigned int; Average bytes of data uploaded by node.
\item current_download unsigned int; Current bytes downloaded by node.
\item current_upload unsigned int; Current bytes uploaded by node.
\item incoming boolean; Is the node getting information from your node?
\item ip string; The node's IP address.
\item live_time unsigned int
\item local_ip boolean
\item localhost boolean
\item peer_id string; The node's ID on the network.
\item port stringl The port that the node is using to connect to the network.
\item recv_count unsigned int
\item recv_idle_time unsigned int
\item send_count unsigned int
\item send_idle_time unsigned int
\item state string
}
}
\description{
Retrieve information about incoming and outgoing connections to your node.
}
\details{
You may need to `Sys.setlocale('LC_ALL','C')` before running this function
because often the data returned contains a string that is not valid in all
locales.
}
\references{
\url{https://getmonero.org/knowledge-base/developer-guides/daemon-rpc#getconnections}
}
\author{
Garrett See
}
|
b50bd3d3715788940a7cd6149848c6374613c626 | 5f413d1ac57354edbb3735265c64fcc20f801b30 | /As1/Rscripts/Oil forecast.R | 2a22f6d2887043acdd187dcd60b101808f5ce681 | [] | no_license | otakoryu/Econometrics-for-Finance | b3f89d050095fcd43853c33ed1835bb5fce24e05 | 9294566a037354b49576b8ccd662eb1812e99fef | refs/heads/master | 2020-04-29T22:32:38.657075 | 2019-03-19T08:22:43 | 2019-03-19T08:22:43 | 176,450,833 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,662 | r | Oil forecast.R | ##Question A(i)
oil<-read.delim("oilf.txt",header=T)
attach(oil)
names(oil)
oil=as.ts(oil$oilf)
plot(oil)
oil.rets<-100*diff(log(oil))
plot(oil.rets)
oil.tr<-as.ts(oil.rets[1:3184])
oil.te<-as.ts(oil.rets[3185:4434])
plot(oil.tr)
plot(oil.te)
library(tseries)
adf.test(oil.rets)
##---Summary----
summary(oil.rets)
library(e1071)
var(oil.rets)
sd(oil.rets)
skewness(oil.rets)
kurtosis(oil.rets)
##--density plot----
plot(density(oil.rets))
hist(oil.rets,freq=FALSE, xlab="log returns",ylab="probability",
main="distribution of oil return",
xlim=c(-15,15),ylim=c(0,0.3),
col="pink",breaks=100,las=1)
curve(dnorm(x,mean=mean(oil.rets),
sd=sd(oil.rets)),add=TRUE,
col="darkblue",lwd=2)
###Normality---------------------------
qqnorm(oil.rets)
qqline(oil.rets,col="red",lwd=3)
library(tseries)
jarque.bera.test(oil.rets)
library(fBasics)
normalTest(oil.rets,method="jb",na.rm=TRUE)
#oilAR(1)
library(forecast)
oilar1<-arima(oil.tr,order=c(1,0,0),method="ML",include.mean = T)
plot(oilar1)
library(portes);
re_model1 <- portest(oilar1, lags=c(5, 10), test=c("LjungBox"), SquaredQ=FALSE)
tsdiag(oilar1)
resid.model1<-residuals(oilar1)
plot(resid.model1)
#foc without re-estimation
oilar1_foc<-forecast(oilar1,h=1250)
plot(oilar1_foc)
#----rolling window---------------
fun1<-function(x){
model<-arima(x,order=c(1,0,0))
return(forecast(model,h=1)$mean)
}
length(oil.tr)
require(zoo)
roll.oilar1<-rollapply(oil.rets,width = 3184,FUN=fun1,align = "right")
length(roll.oilar1)
plot(roll.oilar1)
print(roll.oilar1)
tail(roll.oilar1,2)
plot(oil.rets)
par(new=T)
lines(roll.oilar1, col="pink", lwd=2)
accuracy (roll.oilar1,oil.te[1:1250])
#oilMA(1)
oilma1<-arima(oil.tr,order=c(0,0,1),method="ML",include.mean = T)
plot(oilma1)
#---without restimation-----------------
oilma1_foc<-forecast(oilma1,h=1250)
plot(oilma1_foc)
#-----rolling window------------------
fun2<-function(x){
model2<-arima(x,order=c(0,0,1))
return(forecast(model2,h=1)$mean)
}
roll.oilma1<-rollapply(oil.rets,
width = 3184,FUN=fun2,
align = "right")
plot(roll.oilma1)
plot(oil.rets)
par(new=T)
lines(roll.oilma1,col="pink",lwd=2)
#oilArinma(1,0,1)
oilarima11<-arima(oil.tr,order=c(1,0,1),method="ML",include.mean = T)
plot(oilarima11)
tsdiag(oilarima11)
#-----without re-estimation------
oilarima11_foc<-forecast(oilarima11,h=1250)
plot(oilarima11_foc)
#-----rolling window-----------
fun3<-function(x){
model3<-arima(x,order=c(1,0,1))
return(forecast(model3,h=1)$mean)
}
roll.oilarima11<-rollapply(oil.rets,
width = 3184,FUN=fun3,
align = "right")
plot(roll.oilarima11)
plot(oil.rets)
par(new=T)
lines(roll.oilarima11,col="pink",lwd=2)
#----naive--------------
naivef1<-rwf(oil.tr,h=1250)
plot(naivef1)
#historical mean
mean<-mean(oil.tr)
meanf1<-forecast(mean,h=1250,align="right")
plot(meanf1)
plot(oil.rets)
par(new=T)
lines(mean,col="pink",lwd=2,align="right")
accuracy(mean,oil.te[1:1250])
#SMA
library(TTR)
#20
sma20 <- SMA(oil.rets[3165:4434], 20)
sma20f <- forecast(sma20, 1250)
plot(sma20f)
print(sma20f)
length(sma20f)
print(sma20)
head(sma20,80)
#60
model6_60<-SMA(oil.tr,60)
model6_60_foc<-forecast(model6_60,1250)
plot(model6_60_foc)
print(model6_60_foc)
#180
model6_180<-SMA(oil.tr,180)
model6_180_foc<-forecast(model6_180,1250)
plot(model6_180_foc)
print(model6_180_foc)
###SMA---
library(forecast)
require(smooth)
require(Mcomp)
|
a0835a999a207e3a586a63655020b8ebbd52b8c9 | 8ff8c39abc5e195fe732ff169c77b643c8f94d06 | /ui.R | 47961919afdfe6b8b32c975f9567aa6467bfddcf | [] | no_license | AngelRy/Ddp-final-project | 732e5f33cff74fd8d69073ac3c3e56cde0ffc2b5 | fc9fece9ab9016269b166ec64d6cba95161fa807 | refs/heads/master | 2021-01-19T23:24:35.888191 | 2017-04-25T11:47:19 | 2017-04-25T11:47:19 | 88,972,206 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 974 | r | ui.R |
library(shiny)
shinyUI(fluidPage(theme = "bootstrap.css",
# Application title
titlePanel("Body Mass Index report"),
#
sidebarLayout(
sidebarPanel(
h4("Enter height in cm."),
numericInput("tall", label = "Height in cm", value = NA, min = 55, max = 272),
h4("Enter weight in kg."),
numericInput("fat", label = "Weight in kg", value = NA),
submitButton("Submit!"), br(), br(),
h4("Instructions:"),
h6("Body Mass Index is an indicator of how healthy is a person's weight given their height.
To check your BMI, just enter your height and weight and press the Submit! button.
And never mind the insults/compliments - they're for entertainment purposes only.")
),
#
mainPanel(
img(src="bmimage.png"),
h2("Your BMI (body mass index) is:"),
h2(textOutput("bmi")),
h3("You are "),
h3(textOutput("insult"))
)
)
)) |
6dbc6f439a7555cac5daf5c53d54cfa1f8676387 | e23cad3cbef43d60803fc3fe2e7bec85ffd2811d | /man/alignAssignHashtag.Rd | 0707423858c39ae6baf3cadd5dbcb5022fa6f8de | [] | no_license | kraaijenbrink/AlignAssign | 95c65b90fc7876ff30664f8e3bcec8a6b2edddf0 | 02978939f977b5047ed032213778a696d9c9c502 | refs/heads/master | 2020-07-01T05:36:53.999981 | 2019-08-07T14:36:59 | 2019-08-07T14:36:59 | 201,063,048 | 0 | 0 | null | 2019-08-07T14:07:11 | 2019-08-07T14:07:10 | null | UTF-8 | R | false | true | 747 | rd | alignAssignHashtag.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/align_assign.R
\name{alignAssignHashtag}
\alias{alignAssignHashtag}
\title{#' Align a highlighted region's assignment operators.
#'
#' @return Aligns the equal sign assignment operators (\code{=}) within a
#' highlighted region.
#' @export
alignAssignEqual <- function() {
alignAssign("=")
}}
\usage{
alignAssignHashtag()
}
\value{
Aligns the hastags (\code{#}) within a
highlighted region.
}
\description{
#' Align a highlighted region's assignment operators.
#'
#' @return Aligns the single caret operators (\code{<-}) within a
#' highlighted region.
#' @export
alignAssignArrow <- function() {
alignAssign("<-")
}
Align a highlighted region's comment hastags.
}
|
6389cb2b49a67fdd8de205f35b9b2bf6aae013b3 | b44032d37210f23d97d40420cf3547daa269ab97 | /R/.workflow.R | d332da85f0cd7ba3227c6057597e2d7ad1d3e76f | [
"MIT"
] | permissive | KWB-R/kwb.qmra | 227c4a5d1ef1eb9bb2c863ab7a299175a46d6109 | 9e096057da37bf574626c60e9ad524dff0b1d89a | refs/heads/master | 2022-07-02T08:21:21.895235 | 2021-06-14T20:28:22 | 2021-06-14T20:28:22 | 68,301,647 | 19 | 3 | MIT | 2022-06-08T14:10:17 | 2016-09-15T14:32:15 | R | UTF-8 | R | false | false | 2,513 | r | .workflow.R | library(kwb.qmra)
#library(ggplot2)
### Create configuration files
if (FALSE) {
config_write_dummy()
config_write_dummy("C:/Users/mrustl/Documents/WC_Server/R_Development/trunk/RPackages/kwb.qmra/inst/extdata/configs/dummy")
}
################################################################################
#### 1) CONFIGURATION
################################################################################
confDirs <- dir("C:/Users/mrustl/Desktop/QMRA_configs",full.names = TRUE)
#### DEFINE DIRECTORY ################
configDir <- system.file("extdata/configs/dummy", package = "kwb.qmra")
config <- config_read(configDir)
config_write(config,
confName = "dummy1",
confDir = system.file("extdata/configs", package = "kwb.qmra"),
zipFiles = FALSE)
#### LOAD ############################
config <- config_read(confDir = confDirs[2])
################################################################################
#### 2) SIMULATE RISK
################################################################################
knitr::knit(input = "C:/Users/mrustl/Documents/WC_Server/R_Development/trunk/RPackages/kwb.qmra/inst/extdata/report/workflow.Rmd",
output = "C:/Users/mrustl/Documents/WC_Server/R_Development/trunk/RPackages/kwb.qmra/inst/extdata/report/workflow.md")
risk <- simulate_risk(config)
#inflow <- simulate_inflow(config)
################################################################################
#### 3) VISUALIZE
################################################################################
plot_inflow(risk)
plot_reduction(risk)
plot_effluent(risk)
plot_event_volume(risk)
plot_doseresponse(risk)
### Exposure: effluent conc * volume #####
plot_event_exposure(risk)
#### Dose: based on exposure discrete dose is calculated by using rpois(), for
#### details see: simulate_risk() function
plot_event_dose(risk)
#### RISK PER EVENT ######################
plot_event_infectionProb(risk)
plot_event_illnessProb(risk)
plot_event_dalys(risk)
#### RISK TOTAL ##########################
plot_total_infectionProb(risk)
plot_total_illnessProb(risk)
plot_total_dalys(risk)
################################################################################
#### 4) Create report
################################################################################
set.seed(seed = 1)
report_workflow(confDirs = "C:/Users/mrustl/Desktop/QMRA_configs")
|
e276483a4f4bdc57f2f093377cb07d53ea1df2aa | 9184d97e18768ba4b410994bdef94df9c122ac78 | /mlflow/R/mlflow/inst/examples/r/simple/train.R | 2b9929851b8ae081479ec7bfaec06501251f3ae5 | [
"Apache-2.0"
] | permissive | kevinykuo/mlflow | e8299fbf91120299e2776231e9a2b9577899a09d | 2f6fa76f0b39a695cfc355c9816666b912a62eea | refs/heads/master | 2020-03-28T18:17:50.816547 | 2018-09-15T00:22:54 | 2018-09-15T00:22:54 | 148,869,029 | 1 | 0 | Apache-2.0 | 2018-09-27T04:17:02 | 2018-09-15T04:21:31 | Python | UTF-8 | R | false | false | 81 | r | train.R | library(mlflow)
mlflow_log_param("parameter", 5)
mlflow_log_metric("metric", 0)
|
5b91771912b304be6548bd540123a97351e0ceb7 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/SSDforR/examples/GABrf2.Rd.R | bb487f0295fe5ada8bc6254123cce1c53b8900ab | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 870 | r | GABrf2.Rd.R | library(SSDforR)
### Name: GABrf2
### Title: Autocorrelation for group data
### Aliases: GABrf2
### Keywords: ~kwd1 ~kwd2
### ** Examples
attend<-c(0,0,0,1,0,0,1,0,0,1,0,0,1,0,1,0,0,0,0,0,1,1,0,0,1,NA,
0,1,1,0,1,1,0,1,1,1,0,1,0,0,1,1,1,1,0,0,1,1,0,1,0,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)
week<-c(1,1,1,1,1,2,2,2,2,2,3,3,3,3,3,4,4,4,4,4,5,5,5,5,5,NA,6,6,6,6,6,7,7,7,7,7,
8,8,8,8,8,9,9,9,9,9,10,10,10,10,10,11,11,11,11,11,12,12,12,12,12,13,
13,13,13,13,14,14,14,14,14,15,15,15,15,15)
pattend<-c("A","A","A","A","A","A","A","A","A","A","A","A","A","A","A","A","A",
"A","A","A","A","A",
"A","A","A",NA,"B","B","B","B","B","B","B","B","B","B","B","B","B","B","B"
,"B","B","B",
"B","B","B","B","B","B","B","B","B","B","B","B","B","B",
"B","B","B","B","B","B","B","B","B","B","B","B","B","B","B","B","B","B")
# now run: GABrf2(attend,pattend,week,"A")
|
48aac781085f0574d196c5a7b45bc30875bd96de | fed4409da9801ce1ca986b1814631acb6c8c8aed | /splitdoor/man/splitdoor_causal_estimate.Rd | 571503ef16e95205d1371ea2f020aa7615389997 | [
"MIT"
] | permissive | amit-sharma/splitdoor-causal-criterion | 6b7684b9f752b77aaa3844311d336603249d4421 | 28e22817023e51b4c91205ef4519b4cbd62bf9b6 | refs/heads/master | 2021-01-12T05:02:54.684611 | 2019-12-16T01:08:31 | 2019-12-16T01:08:31 | 77,838,086 | 15 | 5 | MIT | 2019-12-16T01:08:32 | 2017-01-02T14:12:40 | R | UTF-8 | R | false | true | 726 | rd | splitdoor_causal_estimate.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/splitdoor.R
\name{splitdoor_causal_estimate}
\alias{splitdoor_causal_estimate}
\title{Estimate causal effect of multiple treatment variables on outcome variables, given data for their timeseries.}
\usage{
splitdoor_causal_estimate(tseries_df,
fn_independence_test = dcor_independence_test, num_discrete_levels = 4,
independence_threshold = 0.05, ...)
}
\arguments{
\item{independence_threshold}{}
}
\value{
A data.frame containing causal estimates for each pair of (treatment, outcome) variables.
}
\description{
Estimate causal effect of multiple treatment variables on outcome variables, given data for their timeseries.
}
|
f86373f82fca48e37029ede3070fc5f80c007e33 | ef49d1238c49c0b8429c5cf00ac86eba407abbe7 | /man/chapter_7_table_9.Rd | ed5b46a9609e68cad6931b9bc51b1eee65a18afd | [] | no_license | yelleKneK/AMCP | be46c4969bf4e4bb7849a904664d9b3c17e494ef | 72e0e0ff5053d42da9a1c0e2e1ec063586634e8a | refs/heads/master | 2022-11-23T06:39:24.885288 | 2020-07-24T19:57:16 | 2020-07-24T19:57:16 | 282,302,956 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,772 | rd | chapter_7_table_9.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/documentation.R
\docType{data}
\name{chapter_7_table_9}
\alias{chapter_7_table_9}
\alias{C7T9}
\alias{Chapter_7_Table_9}
\alias{c7t9}
\title{The data used in Chapter 7, Table 9}
\format{
An object of class \code{data.frame} with 36 rows and 3 columns.
}
\source{
\url{https://designingexperiments.com/data/}
Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
}
\usage{
data(chapter_7_table_9)
}
\description{
The data used in Chapter 7, Table 9
}
\details{
The following data is a generalization of the blood pressure data given in Table 7.5 (which itself was a generalization of the data given in Table 7.1). After the interaction is found to be significant, a common recommendation is to examine simple main effects. Recall that a simple main effect is the main effect of one factor given a fixed level of another factor. In this case interest is in determining if there are any differences in drugs (a) given biofeedback and (b) given no biofeedback.
}
\section{Variables}{
\describe{
\item{score}{blood pressure}
\item{feedback}{the likelihood of there being a biofeedback or drug main effect}
\item{drug}{the level of the drug factor}}
}
\section{Synonym}{
C7T9
}
\examples{
# Load the data
data(chapter_7_table_9)
# Or, alternatively load the data as
data(C7T9)
# View the structure
str(chapter_7_table_9)
}
\references{
Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
{A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
}
\author{
Ken Kelley \email{kkelley@nd.edu}
}
\keyword{datasets}
|
61b3000807358c3fccb30898afc35a1d560a6680 | 29e1e1848d443227ff4afadda93e96b74c01ad95 | /Arrest3/Arrest3.R | 243568315f789a50f1bb7056d93a8828af99cef9 | [] | no_license | DarcyShu/ArrestData-Whole-Group | 2ef8edd98fdd80b2d771cbb55d0bea028b02dcbf | 6d19075cc351009bbb311acf1aeeb51052238f0b | refs/heads/master | 2021-01-18T19:52:07.470482 | 2017-04-18T02:14:50 | 2017-04-18T02:14:50 | 86,917,837 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,311 | r | Arrest3.R |
four<-read.csv("Four.csv")
library(ggplot2)
library(RColorBrewer)
idx1<-which(four$AGE<=18)
idx2<-which(four$AGE>18 & four$AGE<=25)
idx3<-which(four$AGE<=34&four$AGE>25)
idx4<-which(four$AGE<=44&four$AGE>34)
idx5<-which(four$AGE<=54&four$AGE>44)
idx6<-which(four$AGE>54)
four$AGE[idx1]<-"Under18"
four$AGE[idx2]<-"18to24"
four$AGE[idx3]<-"25to34"
four$AGE[idx4]<-"35to44"
four$AGE[idx5]<-"44to54"
four$AGE[idx6]<-"above55"
four$AGE<-factor(four$AGE, levels=c('Under18','18to24','25to34','35to44','44to54','above55'))
plotbyage<-function(x){
a<-ggplot(x,aes(AGE,fill=factor(RACE)))
a+geom_bar(stat="count")+
facet_grid(.~SHORTCODE)+ylab("Number of Crime")+
xlab("Age")+ggtitle("Distribution of Number of Incidents Over Race and Age")+
guides(fill=guide_legend(title="Race",reverse = F))+
theme(panel.background = element_rect(colour="Black")) +scale_fill_brewer(palette = 'Set1')}
plotbyage(four)
plotbygender<-function(x){
a<-ggplot(x,aes(AGE,fill=factor(GENDER)))
a+geom_bar(stat="count")+
facet_grid(.~SHORTCODE)+ylab("Number of Crime")+
xlab("Age")+ggtitle("Distribution of Number of Incidents Over Age and Gender")+
guides(fill=guide_legend(title="Gender",reverse = T))+
theme(panel.background = element_rect(colour="Black")) +scale_fill_brewer(palette = 'Set1')} |
510ee171015623c052f3676fc40a0dc4da176fdb | ef3e70d51771bcdaa342a2224950b9e6864ff0e7 | /wish/wish_model.R | 2c68e6b22e101a293836d07325122c1d4b60f525 | [] | no_license | kaspardohrin/r | 269aaea3a40cfade1309221ac60f13de01983d95 | a129e0b2941f6d4f986c03430602569d375d7522 | refs/heads/master | 2023-02-24T09:40:47.169016 | 2021-02-02T13:04:34 | 2021-02-02T13:04:34 | 319,960,637 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,996 | r | wish_model.R | # mean of all labels label NOT ROUNDED
wish$mean_label <- (wish$rating_label + wish$unit_sold_label + wish$numberof_tags_label + wish$ratingof_merchant_label) / 4
# verplaats de tags een niveau hoger, i.e., $tags$en wordt $tags
# sub_selection$new_row <- list(c(list(sub_selection$image_tags_list[[1]]$tag, sub_selection$image_tags_list[[1]]$confidence)))
# test with only numbers
wish_model_numbers <- data.frame(unit_sold=wish$unit_sold_label,
rating_label = wish$rating_label,
numberof_tags = wish$numberof_tags_label,
ratingof_merchant = wish$ratingof_merchant_label)
# labels from 1-4 to 0-1
# wish_model_numbers$label <- wish_model_numbers$label/4
# wish_model_numbers$label <- round(wish_model_numbers$label)
# remove rows with na ?
wish_model_numbers <- na.omit(wish_model_numbers)
# add column with confidence level of first 10 tags in categories
for(i in 1:nrow(wish_model_numbers)) {
wish_model_numbers$image_confidence[i] <- mean(wish$image_tags_list[[i]]$confidence[1:10])
}
# verdeel confidence in kwartielen
wish_model_numbers$image_confidence[(wish_model_numbers$image_confidence < 31.88)] <- 1
wish_model_numbers$image_confidence[(wish_model_numbers$image_confidence >= 31.88) & (wish_model_numbers$image_confidence < 37.34)] <- 2
wish_model_numbers$image_confidence[(wish_model_numbers$image_confidence >= 37.34) & (wish_model_numbers$image_confidence <= 44.97)] <- 3
wish_model_numbers$image_confidence[(wish_model_numbers$image_confidence > 44.97)] <- 4
wish_model_numbers <- subset(wish_model_numbers, select= -c(label))
# kNN
library(DMwR)
library(class)
library("imputeTS")
wish_df <- read.csv("/Users/ireneprins/wish_unitssold_df.csv")
wish_df$wish.has_urgency_banner <- na.replace(wish_df$wish.has_urgency_banner, 0)
wish_df_scale <- data.frame(scale(wish_df))
wish_df_scale <- na.omit(wish_df_scale)
idxs <- sample(1:nrow(wish_df_scale),as.integer(0.7*nrow(wish_df_scale)))
wish_df_scale.train <- wish_df_scale[idxs,]
wish_df_scale.test <- wish_df_scale[-idxs,]
cl <- factor(wish_df_scale.train$wish.unit_sold_label)
# prediction
nn3 <- knn(train= wish_df_scale.train, test = wish_df_scale.test, cl= cl, k=3)
nn5 <- knn(train= wish_df_scale.train, test = wish_df_scale.test, cl =cl, k=5)
nn7 <- knn(train= wish_df_scale.train, test = wish_df_scale.test, cl = cl, k=7)
nn10 <- knn(train= wish_df_scale.train, test = wish_df_scale.test, cl =cl, k=10)
acc.3 <- 100 * sum(wish_df_scale.test$wish.unit_sold_label == nn3)/NROW(wish_df_scale.test$wish.unit_sold_label) # 85.7
acc.5 <- 100 * sum(wish_df_scale.test$wish.unit_sold_label == nn5)/NROW(wish_df_scale.test$wish.unit_sold_label) # 83.2
acc.7 <- 100 * sum(wish_df_scale.test$wish.unit_sold_label == nn7)/NROW(wish_df_scale.test$wish.unit_sold_label) # 84.8
acc.10 <- 100 * sum(wish_df_scale.test$wish.unit_sold_label == nn10)/NROW(wish_df_scale.test$wish.unit_sold_label) # 81.0
plot(wish_df_scale)
|
1cee7bc37812c5d1a6dcc0c4cdcdcd60b703de8f | 61125e75a75aa574e1071f9f60d7f2f6b5c3dae7 | /Zhejiang/2 Specialists Selection and Sensitivity Analysis/DAA.R | 8d067d31e20cf756cb8f3c8a827ff3f49f215af8 | [] | no_license | Lujun995/Soil-PyOM-microbiome-studies | 53aa608ba1f24ceafa3a56a061bbc69ac254f340 | 0cac0c7475fca9b2ac7c1407693eeb4b152c8504 | refs/heads/master | 2023-02-01T09:54:11.729154 | 2020-12-14T13:55:17 | 2020-12-14T13:55:17 | 273,355,195 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 8,447 | r | DAA.R | GMPR<-function (comm, intersect.no = 4, ct.min = 2, verbose = FALSE) {
# From Dr. Jun Chen, Chen.Jun2@mayo.edu
# Computes the GMPR size factor
#
# Args:
# comm: a matrix of counts, row - features (OTUs, genes, etc) , column - sample
# intersect.no: the minimum number of shared features between sample pair, where the ratio is calculated
# ct.min: the minimum number of counts required to calculate ratios ct.min = 5 has better results
#
# Returns:
# a list that contains:
# gmpr: GMPR size factors for all samples; Samples with distinct sets of features will be output as NA.
# nss: number of samples with significant sharing (> intersect.no) including itself
# mask counts < ct.min
comm[comm < ct.min] <- 0
if (is.null(colnames(comm))) {
colnames(comm) <- paste0('S', 1:ncol(comm))
}
if (verbose == TRUE)
cat('Begin GMPR size factor calculation ...\n')
comm.no <- numeric(ncol(comm))
gmpr <- sapply(1:ncol(comm), function(i) {
if (i %% 50 == 0) {
if (verbose == TRUE)
cat(i, '\n')
}
x <- comm[, i]
# Compute the pairwise ratio
pr <- x / comm
# Handling of the NA, NaN, Inf
pr[is.nan(pr) | !is.finite(pr) | pr == 0] <- NA
# Counting the number of non-NA, NaN, Inf
incl.no <- colSums(!is.na(pr))
# Calculate the median of PR
pr.median <- colMedians(pr, na.rm=TRUE)
# Record the number of samples used for calculating the GMPR
comm.no[i] <<- sum(incl.no >= intersect.no)
# Geometric mean of PR median
if (comm.no[i] > 1) {
return(exp(mean(log(pr.median[incl.no >= intersect.no]))))
} else {
return(NA)
}
}
)
if (sum(is.na(gmpr))) {
warning(paste0('The following samples\n ', paste(colnames(comm)[is.na(gmpr)], collapse='\n'),
'\ndo not share at least ', intersect.no, ' common taxa with the rest samples! ',
'For these samples, their size factors are set to be NA! \n',
'You may consider removing these samples since they are potentially outliers or negative controls!\n',
'You may also consider decreasing the minimum number of intersecting taxa and rerun the procedure!\n'))
}
if (verbose == TRUE) {
cat('Completed!\n')
cat('Please watch for the samples with limited sharing with other samples based on NSS! They may be outliers! \n')
}
attr(gmpr, 'NSS') <- comm.no
names(gmpr) <- colnames(comm)
return(gmpr * median(colSums(comm)))
}
perm_fdr_adj<-function (F0, Fp) {
ord <- order(F0, decreasing = T)
F0 <- F0[ord]
perm.no <- ncol(Fp)
Fp <- as.vector(Fp)
Fp <- Fp[!is.na(Fp)]
Fp <- sort(c(Fp, F0), decreasing = F)
n <- length(Fp)
m <- length(F0)
FPN <- (n + 1) - match(F0, Fp) - 1:m
p.adj.fdr <- FPN / perm.no / (1:m)
#p.adj.fdr <- sapply(F0, function(x) sum(Fp >=
#x, na.rm=TRUE) / perm.no)/(1:length(F0))
p.adj.fdr <- pmin(1, rev(cummin(rev(p.adj.fdr))))[order(ord)]
}
perm_fwer_adj<-function (F0, Fp) {
ord <- order(F0, decreasing = T)
F0 <- F0[ord]
col.max <- colMaxs(Fp, na.rm=TRUE)
p.adj.fwer <- sapply(F0, function(x) mean(col.max >= x))[order(ord)]
}
na.pad<-function (vec, ind) {
vec0 <- numeric(length(ind))
vec0[!ind] <- vec
vec0[ind] <- NA
vec0
}
permute_differential_analysis<-function (meta.dat, comm, grp.name, adj.name = NULL, size.factor = NULL,
transform = 'arcsqrt', weights = NULL, strata = NULL, perm.no = 999,
stage.no = 1, stage.pv = 0.05, stage.max.pct = 0.20, verbose = TRUE) {
# From Dr. Jun Chen, Chen.Jun2@mayo.edu
# Args:
# meta.dat: a data frame containing the sample information
# comm: a matrix of counts, row - features (OTUs, genes, etc) , column - sample
# size.factor: a numeric vector of the library sizes; if NULL, GMPR size factors will be used
# weights: a vector of the weights; if null, the data will be weighted by size factor
# grp.name: a character, variable of interest; it could be numeric or categorical; Should be in meta.dat
# adj.name: a character vector, variable(s) to be adjusted; they could be numeric or categorical; Should be in meta.dat
# strata: a factor indicating the permutation strata; permutation will be confined to each stratum
# perm.no: the number of permutations; If the FDR/FWER-adjusted p values are the major interest,
# perm.no could be set to 50 to reduce computation
# stage.no: the number of stages if multiple-stage normalization stategy is used
# stage.pv: the raw p value cutoff below which the features will be excluded for calculating the size factor
# stage.max.pct: the maximum percentage of features that will be excluded
# verbose: whether the trace information should be printed out
#
# Returns:
# a list that contains:
# call: the call
# R2: a vector of percent explained variance for
# p.value: the raw p-values based on permutations
# p.adj.fdr: permutation-based FDR-adjusted p.value
# p.adj.fwer: permutation-based FWER-adjusted p.value
# size.factor: the size.factor used
# weights: the weights used
this.call = match.call()
if (is.null(size.factor)) {
size.factor <- GMPR(comm)
} else {
}
n <- ncol(comm)
row.names <- rownames(comm)
for (i in 1:stage.no) {
if (verbose == TRUE)
cat('Stage ', i, '...\n')
if (is.null(weights)) {
W <- size.factor
} else {
W <- weights*size.factor
}
W <- sqrt(W)
Y <- t(t(comm) / size.factor)
if (transform == 'arcsqrt') {
Y[Y <= 0] <- 0
Y[Y >= 1] <- 1
Y <- asin(sqrt(Y))
}
Y <- W * Y
# Covariate space (including intercept)
if (is.null(adj.name)) {
M0 <- model.matrix(~ 1, meta.dat)
} else {
df0 <- meta.dat[, c(adj.name), drop = FALSE]
M0 <- model.matrix( ~ ., df0)
}
M0 <- W * M0
# Remove covariate effects
Y <- t(resid(lm(as.formula(paste('t(Y) ~ M0 - 1')), meta.dat)))
if (!is.null(strata)) {
strata <- factor(strata)
}
# Residual space after adjusting covariate
df1 <- meta.dat[, c(grp.name), drop = FALSE]
M1 <- model.matrix( ~ . - 1, df1)
M1 <- W * M1
M1 <- as.matrix(resid(lm(M1 ~ M0 - 1)))
# QR decompostion
qrX0 <- qr(M0, tol = 1e-07)
Q0 <- qr.Q(qrX0)
Q0 <- Q0[, 1:qrX0$rank, drop = FALSE]
qrX1 <- qr(M1, tol = 1e-07)
Q1 <- qr.Q(qrX1)
Q1 <- Q1[, 1:qrX1$rank, drop = FALSE]
TSS <- rowSums(Y^2)
MSS1 <- rowSums((Y %*% Q1)^2)
# Scaled F-stat
F0 <- MSS1 / (TSS - MSS1)
R2 <- MSS1 / TSS
perm.ind <- vegan:::getPermuteMatrix(perm.no, n, strata = strata)
perm.no <- nrow(perm.ind)
Fp <- sapply(1:perm.no, function(i) {
if (verbose) {
if (i %% 100 == 0) cat('.')
}
Q1p <- Q1[perm.ind[i, ], , drop = FALSE]
MSS1p <- rowSums((Y %*% Q1p)^2)
MSS1p / (TSS - MSS1p)
})
if (verbose) {
cat('\n')
}
if (mean(is.na(F0)) >= 0.1) {
warning('More than 10% observed F stats have NA! Please check! \n')
}
if (mean(is.na(Fp)) >= 0.1) {
warning('More than 10% permuted F stats have NA! Please check! \n')
}
na.ind <- is.na(F0)
F0 <- F0[!na.ind]
Fp <- Fp[!na.ind, ]
p.raw <- cbind(Fp >= F0, 1)
p.raw <- rowMeans(p.raw)
if (i == stage.no) {
break
} else {
# recalculating the size factor
if (mean(p.raw <= stage.pv) > stage.max.pct) {
ind <- p.raw > quantile(p.raw, stage.max.pct)
} else {
ind <- p.raw > stage.pv
}
size.factor <- GMPR(comm[ind, ])
}
}
# scaled F stat
p.adj.fdr <- perm_fdr_adj(F0, Fp)
p.adj.fwer <- perm_fwer_adj(F0, Fp)
#Fp <- 1 - (apply(Fp, 1, rank) - 1) / ncol(Fp)
#Fp <- t(Fp)
#p.adj.fdr <- perm_fdr_adj(-p.raw, -Fp)
#p.adj.fwer <- perm_fwer_adj(-p.raw, -Fp)
p.raw <- na.pad(p.raw, na.ind)
p.adj.fdr <- na.pad(p.adj.fdr, na.ind)
p.adj.fwer <- na.pad(p.adj.fwer, na.ind)
names(p.raw) <- names(p.adj.fdr) <- names(p.adj.fwer) <- row.names
if (verbose) cat('Completed!\n')
return(list(call = this.call, R2 = R2, p.raw = p.raw, p.adj.fdr = p.adj.fdr, p.adj.fwer = p.adj.fwer,
size.factor = size.factor, weights = weights))
}
|
b01109ee7d1d97524d010a85d8abd24148b75bc0 | d5bc24a322805e42f14c3fdd639a264fdf25aff1 | /man/study_layout.Rd | b919e4c39fa687d66c2cc39e65de17ce552ee987 | [] | no_license | rfinkers/brapi | c97e1170235585598d53245eede2931f0be69085 | 6b16c7ec32e4904556ff9ed4577e02d7c6ce6032 | refs/heads/master | 2020-12-01T13:07:35.132066 | 2016-06-10T11:01:13 | 2016-06-10T11:01:13 | 64,315,252 | 1 | 0 | null | 2016-07-27T14:26:36 | 2016-07-27T14:26:36 | null | UTF-8 | R | false | true | 455 | rd | study_layout.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/study_layout.R
\name{study_layout}
\alias{study_layout}
\title{study layout}
\usage{
study_layout(studyId = NULL)
}
\arguments{
\item{studyId}{integer}
}
\value{
list of study attributes
}
\description{
Gets additional metadata about a study
}
\author{
Reinhard Simon
}
\references{
\url{http://docs.brapi.apiary.io/#reference/study/layout/retrieve-study-details?console=1}
}
|
815ce47a972343f41b4b4c24abd5b5c2f294bb1c | 12a78fa1241d98787284e25f953cb855a2c3eda5 | /man/mod_filtre_control_bar.Rd | 9d297b9f006c378e037c19bd08b8749dc76a567d | [
"MIT"
] | permissive | ove-ut3/ip.resultats | e75386da99ff99fa67ed313900c4555b9e8ded62 | cbec566fc7c6808a2fc77c702d30dc58e85cb609 | refs/heads/master | 2021-01-03T20:52:46.746042 | 2020-05-10T16:09:56 | 2020-05-10T16:09:56 | 240,232,220 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 535 | rd | mod_filtre_control_bar.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mod_filtre_control_bar.R
\name{mod_filtre_control_bar_ui}
\alias{mod_filtre_control_bar_ui}
\alias{mod_filtre_control_bar_server}
\title{mod_filtre_control_bar_ui and mod_filtre_control_bar_server}
\usage{
mod_filtre_control_bar_ui(id)
mod_filtre_control_bar_server(input, output, session, rv)
}
\arguments{
\item{id}{shiny id}
\item{input}{internal}
\item{output}{internal}
\item{session}{internal}
}
\description{
A shiny Module.
}
\keyword{internal}
|
f0ac6a144e9f741c291bf3089875894f7afb5b27 | 3cc888d60aa5e76ccee48be08d1e9849792bf503 | /R/screen.ttest.R | 4c4b5ad0ae9565f40e7919868903c55348b4cb39 | [] | no_license | ecpolley/SuperLearner | 4584cdbe7dccf945689958d20d0ea779a826bbda | 801aa6039460648d4dfd87c1fad77e5f29391cb7 | refs/heads/master | 2023-07-24T21:52:33.665047 | 2023-07-18T13:56:30 | 2023-07-18T13:56:30 | 1,622,048 | 245 | 82 | null | 2019-08-06T14:25:24 | 2011-04-16T05:18:51 | R | UTF-8 | R | false | false | 503 | r | screen.ttest.R | screen.ttest <- function(Y, X, family, obsWeights, id, rank = 2, ...) {
# implemented with colttests from the genefilter package
.SL.require('genefilter')
if (family$family == "gaussian") {
stop('t-test screening undefined for gaussian family, look at screen.corP or screen.corRank')
}
if (family$family == "binomial") {
listP <- genefilter::colttests(x = as.matrix(X), fac = as.factor(Y), tstatOnly = FALSE)$p.value
}
whichVariable <- (rank(listP) <= rank)
return(whichVariable)
} |
1c0bd9589dd849d142d6ee8324c2b7dde85f412d | 886d1d1e048673be7dbced56bcf51e474cc74567 | /cachematrix.R | 6832ea35a53a73174dd954162ef4814650034dff | [] | no_license | jkholtzman/ProgrammingAssignment2 | 55ba0b37128ec6416562a7d8d102c5826a9e42ff | 7b57d481e41686aa822340269ca7cef6d93343f4 | refs/heads/master | 2020-06-13T18:59:47.711781 | 2016-12-04T22:02:23 | 2016-12-04T22:02:23 | 75,566,027 | 0 | 0 | null | 2016-12-04T21:34:38 | 2016-12-04T21:34:38 | null | UTF-8 | R | false | false | 1,128 | r | cachematrix.R | # The purpose of this file is to build a function to perform a potentially expensive computation,
# inverting a matrix, and cache it for future use.
# This file contains two functions, makeCacheMatrix and cacheSolve
# The first function builds a vector that contains several functions for storing the matrix,
# inverting it, and returning the cached copy when called.
# The second function exercises the first.
# Jeff Holtzman, 2016-12-04
# build a vector for managing a matrix and its inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(solve) i <<- solve
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
# Return a matrix that is the inverse of 'x', either computed or cached
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
4b54ce5b79b779c8941571920eb196f45991a859 | f85c3a502acc3e1252b28ca1af2f728a6ca573f0 | /R/get_example_filenames.R | 49fd3f6a123cea9cc05b8ce33bdb5bb9ebda7594 | [] | no_license | cran/pureseqtmr | 9f3d7f4010adfdc6c5b9a8446a13b3cc904dd406 | 9b53165e7dd3cdd0ded2fdf163f5c73b0c5a7fae | refs/heads/master | 2023-04-15T04:44:21.088795 | 2023-04-06T12:40:02 | 2023-04-06T12:40:02 | 284,769,850 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 846 | r | get_example_filenames.R | #' Get the full path to all PureseqTM example files
#' @inheritParams default_params_doc
#' @return a character vector with all PureseqTM example files
#' @examples
#' if (is_pureseqtm_installed()) {
#' get_example_filenames()
#' }
#' @seealso use \link{get_example_filename} to get the full
#' path to a PureseqTM example file
#' @author Richèl J.C. Bilderbeek
#' @export
get_example_filenames <- function(
folder_name = get_default_pureseqtm_folder()
) {
pureseqtmr::check_pureseqtm_installation(folder_name)
pureseqtm_folder <- file.path(folder_name, "PureseqTM_Package")
testthat::expect_true(dir.exists(pureseqtm_folder))
pureseqtm_examples_folder <- file.path(pureseqtm_folder, "example")
testthat::expect_true(dir.exists(pureseqtm_examples_folder))
list.files(
pureseqtm_examples_folder,
full.names = TRUE
)
}
|
059180043ea522a0d30eb4e280e63d238549071f | e0e96a52e59fcf3ebad6ee22527e5c7f8e2b94f9 | /r/R/ModelBreak.r | 6baa36da2fa5be71ecbf49ccdaede928a1cbf667 | [] | no_license | ajisantoso/directions-api-clients | 4db7e05827afbcedf5fc552b00df1b224e454cb4 | 1a0591d55602a020ef3fa3631d7820d3c2756213 | refs/heads/master | 2021-09-01T00:03:54.102080 | 2017-12-23T16:30:28 | 2017-12-23T16:30:54 | 115,403,581 | 1 | 0 | null | 2017-12-26T08:41:55 | 2017-12-26T08:41:55 | null | UTF-8 | R | false | false | 4,861 | r | ModelBreak.r | # GraphHopper Directions API
#
# You use the GraphHopper Directions API to add route planning, navigation and route optimization to your software. E.g. the Routing API has turn instructions and elevation data and the Route Optimization API solves your logistic problems and supports various constraints like time window and capacity restrictions. Also it is possible to get all distances between all locations with our fast Matrix API.
#
# OpenAPI spec version: 1.0.0
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' ModelBreak Class
#'
#' @field earliest
#' @field latest
#' @field duration
#' @field max_driving_time
#' @field initial_driving_time
#' @field possible_split
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ModelBreak <- R6::R6Class(
'ModelBreak',
public = list(
`earliest` = NULL,
`latest` = NULL,
`duration` = NULL,
`max_driving_time` = NULL,
`initial_driving_time` = NULL,
`possible_split` = NULL,
initialize = function(`earliest`, `latest`, `duration`, `max_driving_time`, `initial_driving_time`, `possible_split`){
if (!missing(`earliest`)) {
stopifnot(is.numeric(`earliest`), length(`earliest`) == 1)
self$`earliest` <- `earliest`
}
if (!missing(`latest`)) {
stopifnot(is.numeric(`latest`), length(`latest`) == 1)
self$`latest` <- `latest`
}
if (!missing(`duration`)) {
stopifnot(is.numeric(`duration`), length(`duration`) == 1)
self$`duration` <- `duration`
}
if (!missing(`max_driving_time`)) {
stopifnot(is.numeric(`max_driving_time`), length(`max_driving_time`) == 1)
self$`max_driving_time` <- `max_driving_time`
}
if (!missing(`initial_driving_time`)) {
stopifnot(is.numeric(`initial_driving_time`), length(`initial_driving_time`) == 1)
self$`initial_driving_time` <- `initial_driving_time`
}
if (!missing(`possible_split`)) {
stopifnot(is.list(`possible_split`), length(`possible_split`) != 0)
lapply(`possible_split`, function(x) stopifnot(is.character(x)))
self$`possible_split` <- `possible_split`
}
},
toJSON = function() {
ModelBreakObject <- list()
if (!is.null(self$`earliest`)) {
ModelBreakObject[['earliest']] <- self$`earliest`
}
if (!is.null(self$`latest`)) {
ModelBreakObject[['latest']] <- self$`latest`
}
if (!is.null(self$`duration`)) {
ModelBreakObject[['duration']] <- self$`duration`
}
if (!is.null(self$`max_driving_time`)) {
ModelBreakObject[['max_driving_time']] <- self$`max_driving_time`
}
if (!is.null(self$`initial_driving_time`)) {
ModelBreakObject[['initial_driving_time']] <- self$`initial_driving_time`
}
if (!is.null(self$`possible_split`)) {
ModelBreakObject[['possible_split']] <- self$`possible_split`
}
ModelBreakObject
},
fromJSON = function(ModelBreakJson) {
ModelBreakObject <- jsonlite::fromJSON(ModelBreakJson)
if (!is.null(ModelBreakObject$`earliest`)) {
self$`earliest` <- ModelBreakObject$`earliest`
}
if (!is.null(ModelBreakObject$`latest`)) {
self$`latest` <- ModelBreakObject$`latest`
}
if (!is.null(ModelBreakObject$`duration`)) {
self$`duration` <- ModelBreakObject$`duration`
}
if (!is.null(ModelBreakObject$`max_driving_time`)) {
self$`max_driving_time` <- ModelBreakObject$`max_driving_time`
}
if (!is.null(ModelBreakObject$`initial_driving_time`)) {
self$`initial_driving_time` <- ModelBreakObject$`initial_driving_time`
}
if (!is.null(ModelBreakObject$`possible_split`)) {
self$`possible_split` <- ModelBreakObject$`possible_split`
}
},
toJSONString = function() {
sprintf(
'{
"earliest": %d,
"latest": %d,
"duration": %d,
"max_driving_time": %d,
"initial_driving_time": %d,
"possible_split": [%s]
}',
self$`earliest`,
self$`latest`,
self$`duration`,
self$`max_driving_time`,
self$`initial_driving_time`,
lapply(self$`possible_split`, function(x) paste(paste0('"', x, '"'), sep=","))
)
},
fromJSONString = function(ModelBreakJson) {
ModelBreakObject <- jsonlite::fromJSON(ModelBreakJson)
self$`earliest` <- ModelBreakObject$`earliest`
self$`latest` <- ModelBreakObject$`latest`
self$`duration` <- ModelBreakObject$`duration`
self$`max_driving_time` <- ModelBreakObject$`max_driving_time`
self$`initial_driving_time` <- ModelBreakObject$`initial_driving_time`
self$`possible_split` <- ModelBreakObject$`possible_split`
}
)
)
|
78ce6612fbc7f3c50060f347a840083fd182b82f | 915150da295f7300c3f9df2bbfaedffdce23324f | /00-Misc/sampling_maps/sampling_maps.R | 2da4679a7b16c8a65b1e8487aeb4ba4b6729d8dd | [] | no_license | eboulanger/seaConnect--radFishComp | c60aee6efb82365d2e81ed5774c44117a837c93e | a1eda99b4d35a72a2b04be99f59c7cc4045723bc | refs/heads/master | 2023-02-03T12:51:44.681078 | 2020-12-16T15:15:57 | 2020-12-16T15:15:57 | 191,384,118 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,043 | r | sampling_maps.R | # create maps of sampling cell with size indicating sample size
# libraries
library(png)
library(maps)
library(mapdata)
library(dplyr)
library(ggplot2)
library(scales)
library(stringr)
library(reshape)
source("scale_bar.R")
# import stamps
data_pic <- list.files(path = "../../00-Misc/stamps/", pattern="*.png",recursive = FALSE)
source_pic <- paste0("../../00-Misc/stamps/", data_pic)
pic <- lapply(source_pic,readPNG)
names(pic) <- str_sub(data_pic,1, -5)
# coord data
coord <- read.table("data/coord_seaconnect_tous.txt", sep = "\t", head = TRUE)
pop_dip <- read.table("data/dip_population_map_297ind.txt", sep = "\t", head = TRUE)
pop_mul <- read.table("data/mul_population_map_467ind.txt", sep = "\t", head = TRUE)
# wrangle to one dataset with coords and sample size diplodus and mullus
n_dip <-pop_dip %>%
group_by(STRATA) %>%
summarise(length(INDIVIDUALS))
colnames(n_dip)[2] <- "n_dip"
n_mul <-pop_mul %>%
group_by(STRATA) %>%
summarise(length(INDIVIDUALS))
colnames(n_mul)[2] <- "n_mul"
coord_size <- coord %>%
mutate(STRATA = SamplingCell) %>%
select(STRATA, Longitude, Latitude) %>%
left_join(n_dip, by = "STRATA") %>%
left_join(n_mul, by = "STRATA")
summary(coord_size$n_dip)
summary(coord_size$n_mul)
# how many sites in total? remove rows with twice NA
coord_size[rowSums(is.na(coord_size)) != 2,] %>% nrow()
# separate maps ----
# map D sargus sampling
#pdf(file="sampling_map_diplodus_297ind_cellNum.pdf", width = 16, height = 9)
map("worldHires", xlim=c(-8,37), ylim=c(29.5,47),col = "gray80", boundary = TRUE, interior = FALSE, fill = TRUE, border = NA)
points(coord_size$Longitude, coord_size$Latitude, pch=19, col="#053061", cex=scales::rescale(coord_size$n_dip,to = c(1,4)))
legend("bottomleft",
legend = c(1, 3, 5, 7, 10),
title = "# Diplodus sargus",
pch=20,col="#053061",cex=1.2,
pt.cex = c(1, 1.7, 2.5, 3.3, 4),
ncol=3,
bg = "transparent", bty = "n")
map.axes(cex.axis=1)
map.scale(3, 31, ratio=FALSE, relwidth=0.15, cex=1)
text(labels = coord_size$STRATA[!is.na(coord_size$n_dip)],
coord_size$Longitude[!is.na(coord_size$n_dip)],
coord_size$Latitude[!is.na(coord_size$n_dip)] + 0.5, cex = 0.7)
# map M surmuletus sampling
#pdf(file="sampling_map_mullus_424ind_cellNum.pdf", width = 16, height = 9)
map("worldHires", xlim=c(-8,37), ylim=c(29.5,47),col = "gray80", boundary = TRUE, interior = FALSE, fill = TRUE, border = NA)
points(coord_size$Longitude, coord_size$Latitude, pch=19, col="#67001f", cex=rescale(coord_size$n_mul,to = c(1, 4)))
legend("bottomleft",
legend = c(1, 3, 5, 7, 10),
title = "# Mullus surmuletus",
pch=20,col="#67001f",cex=1.2,
pt.cex = c(1, 1.7, 2.5, 3.3, 4),
ncol=3,
bg = "transparent", bty = "n")
map.axes(cex.axis=1)
map.scale(3, 31, ratio=FALSE, relwidth=0.15, cex=1)
text(labels = coord_size$STRATA[!is.na(coord_size$n_mul)],
coord_size$Longitude[!is.na(coord_size$n_mul)],
coord_size$Latitude[!is.na(coord_size$n_mul)] + 0.5, cex = 0.7)
# both with different symbols
pdf(file="maps/sampling_map_both_triangle.pdf", width = 16, height = 9)
map("world", xlim=c(-8,37), ylim=c(29.5,47),col = "gray80", boundary = TRUE, interior = FALSE, fill = TRUE, border = NA)
points(coord_size$Longitude, coord_size$Latitude, pch=2, col="#67001f", cex=rescale(coord_size$n_mul,to = c(1, 4)))
points(coord_size$Longitude, coord_size$Latitude, pch=6, col="#053061", cex=scales::rescale(coord_size$n_dip,to = c(1,4)))
legend("bottomleft",
legend = c(1, 3, 5, 7, 10),
title = "# individuals",
pch=11,col="black",cex=1.2,
pt.cex = c(1, 1.7, 2.5, 3.3, 4),
ncol=3,
bg = "transparent", bty = "n")
legend("topleft",
legend = c("Diplodus sargus", "Mullus surmuletus"),
pch=c(6,2),col=c("#053061","#67001f"),cex=1.2,
pt.cex = 1.2,
bg = "transparent", bty = "n")
dev.off()
# combined map ----
# two species on one map: pie charts where both present
map_data <- coord_size[,c("STRATA", "Longitude", "Latitude")]
map_data$diplodus <- coord_size$n_dip
map_data$mullus <- coord_size$n_mul
map_data$diplodus[is.na(map_data$diplodus)] <- 0
map_data$diplodus[map_data$diplodus>0] <- 1
map_data$mullus[is.na(map_data$mullus)] <- 0
map_data$mullus[map_data$mullus>0] <- 1
# remove empty rows
map_data <- map_data[rowSums(map_data == 0) != 2,]
##### pie maps #####
pie_cell <- map_data[,c("diplodus", "mullus")] %>%
data.matrix(rownames.force = NA)
# replace 0's with 0.00001 so they are not ignored
pie_cell[pie_cell == 0] <- 0.0001
lon_cell <- map_data$Longitude
lat_cell <- map_data$Latitude
#pdf(file="sampling_map_both_cellNum.pdf", width = 16, height = 9)
map("worldHires", xlim=c(-8,37), ylim=c(29.5,47),col = "gray80", boundary = TRUE, interior = FALSE, fill = TRUE, border = NA)
for(i in 1:nrow(pie_cell)) {
floating.pie(lon_cell[i], lat_cell[i], pie_cell[i, ], radius = 0.4, col = c("#4393c3", "#d6604d"))
}
legend("bottomleft",
legend = c("Diplodus sargus", "Mullus surmuletus"),
pch=19, cex = 1.5, ncol = 1,
col= c("#4393c3", "#d6604d"),
bty ="o", bg ="gray90",box.col = "gray90")
map.scale(3, 31, ratio=FALSE, relwidth=0.15, cex=1)
map.axes(cex.axis=0.8)
#rasterImage(pic$diplodus_sargus,
# xleft = 2, xright = 3.7,
# ybottom = 31, ytop = 32)
text(labels = map_data$STRATA,
map_data$Longitude,
map_data$Latitude+ 0.5, cex = 0.7)
dev.off()
dev.set(dev.prev())
##### symbol map #####
map_data$species <- rep(0, nrow(map_data))
map_data$species[map_data$diplodus == 1 & map_data$mullus == 1] <- "both"
map_data$species[map_data$diplodus == 1 & map_data$mullus == 0] <- "diplodus"
map_data$species[map_data$diplodus == 0 & map_data$mullus == 1] <- "mullus"
# map in ggplot2 ----
# tutorial: http://eriqande.github.io/rep-res-web/lectures/making-maps-with-R.html
wH <- map_data("world", xlim=c(-8,37), ylim=c(29.5,47)) # subset polygons surrounding med sea
# further subset dataset so don't plot whole polygons
# wH_sub <- wH[wH$long<37 & wH$long > c(-8) & wH$lat < 47 & wH$lat > 29.5,] # creates weird margins around map. rather set limits in ggplot
med_base <- ggplot() +
geom_polygon(data = wH, aes(x=long, y = lat, group = group), fill = "gray80", color = "black") +
coord_fixed( xlim=c(-8,37), ylim=c(29.5,46.5), ratio = 1.3) +
labs(x= "Longitude (°)", y = "Latitude (°)") +
#theme_nothing() +
theme(panel.background = element_rect(fill = "white", colour = "black"),
panel.border = element_rect(fill = NA, colour = "black")) # add black border on top again
sampling <- med_base +
geom_point(data = map_data, aes(x=Longitude, y = Latitude, shape = species), cex = 3) +
geom_text(data = map_data, aes(x=Longitude, y=Latitude + 0.4, label = STRATA), cex = 3) +
scale_shape_discrete(labels=c("both species", "Diplodus sargus", "Mullus surmuletus")) +
theme(legend.position = c(0.07, 0.07),
legend.background = element_blank(),
legend.title = element_blank())
#sampling
ggsave(sampling, filename= "maps/sampling_map_both_cellNum_shape.pdf", width = 13, height = 7)
# export map data for adding shapes to other figures
write.csv(map_data, "sampling_species_data.csv", row.names = F)
# add fish silhouettes
# install EBImage
library(ggimage)
med_base +
geom_image(data = map_data, aes(x=Longitude, y = Latitude),
image="../../00-Misc/stamps/diplodus_sargus.png", size = 0.05)
#### combined but overlapping ####
med_base +
geom_point(data = select(coord_size, -n_mul), aes(x= Longitude, y= Latitude, size = n_dip), pch = 21,col = "blue") + #, col = "white", alpha = 0.5) +
geom_point(data = select(coord_size, -n_dip), aes(x= Longitude, y= Latitude, size = n_mul), pch = 21,col = "red" ) + #, col = "white", alpha = 0.5) +
scale_size_continuous(name = "# of fish", breaks = c(1, 3, 7,10)) +
theme(legend.position = c(0.06, 0.12))
ggdata_coord_size <- pivot_longer(coord_size, cols = c(n_dip, n_mul),names_to = "species", values_to = "sample_size")
sampling_both <- med_base +
geom_point(data = ggdata_coord_size, aes(x= Longitude, y= Latitude, size = sample_size, col = species), pch = 21) +
scale_size_continuous(name = "# of fish", breaks = c(1, 3, 7,10)) +
scale_colour_manual(values= c("blue", "red"), labels = c("Diplodus sargus", "Mullus surmuletus")) +
guides(colour=guide_legend(ncol=2),
size =guide_legend(ncol=1)) +
theme(legend.position = c(0.2, 0.12),legend.direction = "horizontal")
sampling_both
ggsave(sampling_both, filename ="maps/sampling_map_both_size_col.pdf", width = 13, height = 7)
#### map EEZs ####
# source : https://www.marineregions.org/downloads.php
library(sf)
eez.boundaries <- st_read("~/Documents/Data/GIS/MarineRegions/World_EEZ_v11_20191118/eez_boundaries_v11.shp")
class(eez.boundaries)
eez <- st_read("~/Documents/Data/GIS/MarineRegions/World_EEZ_v11_20191118/eez_v11.shp")
# crop to med extent because too large to plot
eez.med <- st_crop(eez, xmin=-13, xmax=42, ymin=25,ymax=50)
med_eez <- ggplot() +
geom_sf(data = eez.med, fill = NA) +
geom_polygon(data = wH, aes(x=long, y = lat, group = group), fill = "gray80", color = "black") +
coord_sf(xlim=c(-8,37), ylim=c(29.5,46.5)) +
labs(x= "Longitude (°)", y = "Latitude (°)") +
theme(panel.background = element_rect(fill = "white", colour = "black"),
panel.border = element_rect(fill = NA, colour = "black")) # add black border on top again
ggsave(med_eez, filename= "maps/eez_shapes_med.pdf", width = 13, height = 7)
med_eez.b <- ggplot() +
geom_sf(data = eez.boundaries, color = "gray47") +
geom_polygon(data = wH, aes(x=long, y = lat, group = group), fill = "gray80", color = "gray47") +
coord_sf(xlim=c(-8,37), ylim=c(29.5,46.5)) +
labs(x= "Longitude (°)", y = "Latitude (°)") +
theme(panel.background = element_rect(fill = "white", colour = "black"),
panel.border = element_rect(fill = NA, colour = "black")) # add black border on top again
ggsave(med_eez.b, filename= "maps/eez_boundaries_med.pdf", width = 13, height = 7)
# add sampling
sampling_eez <- med_eez.b +
geom_point(data = map_data, aes(x=Longitude, y = Latitude, shape = species), cex = 3) +
scale_shape_discrete(labels=c("both species", "Diplodus sargus", "Mullus surmuletus")) +
theme(legend.position = c(0.07, 0.07),
legend.background = element_blank(),
legend.title = element_blank(),
legend.key = element_blank())
ggsave(sampling_eez, filename= "maps/sampling_both_shape_eez.pdf", width = 13, height = 7)
#### Marine Ecoregions of the World ####
# source: https://www.worldwildlife.org/publications/marine-ecoregions-of-the-world-a-bioregionalization-of-coastal-and-shelf-areas
library(sf)
library(forcats)
meow <- st_read("~/Documents/Data/GIS/MarineRegions/MEOW/meow_ecos.shp")
class(meow)
# crop to med extent because too large to plot
meow.med <- st_crop(meow, xmin=-13, xmax=42, ymin=25,ymax=50)
# remove non-med seas
meow.med.bis <- meow.med %>% filter(ECOREGION %in% c("Alboran Sea", "Western Mediterranean",
"Adriatic Sea", "Ionian Sea", "Tunisian Plateau/Gulf of Sidra",
"Aegean Sea", "Levantine Sea"))
meow.med.bis$ECOREGION <- fct_recode(meow.med.bis$ECOREGION,'Tunisian Plateau' = "Tunisian Plateau/Gulf of Sidra")
meow.med.bis <- cbind(meow.med.bis, st_coordinates(st_centroid(meow.med.bis))) # add centroid for lables
# coordinates almeria oran front
aof <- data.frame(city = c("Almeria", "Oran"),lon=c(-2.4597400,-0.6416700), lat = c(36.8381400,35.6911100))
med_meow <- ggplot() +
geom_sf(data = meow.med.bis, aes(fill = ECOREGION), alpha = 0.6) +
geom_polygon(data = wH, aes(x=long, y = lat, group = group), fill = "gray80", color = "black") +
coord_sf(xlim=c(-8,37), ylim=c(29.5,46.5)) +
scale_fill_brewer() +
geom_text(data = meow.med.bis, aes(X, Y, label = ECOREGION), size = 3, fontface = "italic",
angle = c(-40, 0, 0, 0, 0, 0, 0),
nudge_x = c(0, 0, 0, 0, -1, -0.5, 0),
nudge_y = c(-1,0,2,0,-2.1,0.9,-1)) +
#geom_path(data = aof, aes(x=lon, y = lat), linetype = 2) +
labs(x= "Longitude (°)", y = "Latitude (°)") +
theme(legend.position = "none",
panel.background = element_rect(fill = "white", colour = "black"),
panel.border = element_rect(fill = NA, colour = "black")) # add black border on top again
ggsave(med_meow, filename= "maps/ecoregions_med.pdf", width = 13, height = 7)
# adjust colours
# ecoregion palette
colregion <- c("#FF0000","#5FB7FF","#1A8C18","#8D0000","#34638D","#FFA600","#99CF1C")
med_meow_cadj <- ggplot() +
geom_sf(data = meow.med.bis, aes(fill = ECOREGION), alpha = 0.8) +
geom_polygon(data = wH, aes(x=long, y = lat, group = group), fill = "gray80", color = "black") +
coord_sf(xlim=c(-8,37), ylim=c(29.5,46.5)) +
scale_fill_manual(values = colregion) +
geom_text(data = meow.med.bis, aes(X, Y, label = ECOREGION), size = 3, fontface = "italic",
angle = c(-40, 0, 0, 0, 0, 0, 0),
nudge_x = c(0, 0, 0, 0, -1, -0.5, 0),
nudge_y = c(-1,0,2,0,-2.1,0.9,-1)) +
#geom_path(data = aof, aes(x=lon, y = lat), linetype = 2) +
labs(x= "Longitude (°)", y = "Latitude (°)") +
theme(legend.position = "none",
panel.background = element_rect(fill = "white", colour = "black"),
panel.border = element_rect(fill = NA, colour = "black")) # add black border on top again
ggsave(med_meow_cadj, filename= "maps/ecoregions_med_coladj.pdf", width = 13, height = 7)
# add sampling
# add sampling
sampling_meow <- med_meow_cadj +
#geom_point(data = map_data, aes(x=Longitude, y = Latitude, shape = species), cex = 3) +
#scale_shape_discrete(labels=c("both species", "Diplodus sargus", "Mullus surmuletus")) +
geom_text(data=map_data, aes(x=Longitude, y=Latitude, label=STRATA))
ggsave(sampling_meow, filename= "maps/sampling_both_shape_meow_coladj.pdf", width = 13, height = 7)
|
1e79946fecb6d2f6c9599d36f7472073deea4159 | 78d7f3dceec1602722f46a8d6aaad357a9207291 | /qgsub.R | 75e75618e43d13130bc8ac069c1a369d9e31c230 | [
"MIT"
] | permissive | wt12318/r_command_line | 07389aa9df91df0709bd72e9be9a09a615dbbfca | f191268dfab703ec45b0ac9674404005dbd12821 | refs/heads/main | 2023-05-01T11:30:08.695867 | 2021-05-12T02:11:45 | 2021-05-12T02:11:45 | 366,275,950 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,362 | r | qgsub.R | #!/usr/bin/Rscript
if ("optparse" %in% installed.packages()){
library("optparse")
}else{
install.packages("optparse")
}
option_list <- list(
make_option(c("-t", "--template"), action="store", default=NULL,
help="File need to be changed"),
make_option(c("-m", "--mapping"), action="store",default=NULL,
help="Mapping file, each column corresponds to the values to replace"),
make_option(c("-r", "--replace"), action="store", default=NULL,
help="Character that need to be replaced, if there are more than 1 word, words should been splited by space"),
make_option(c("-p", "--prefix"), action="store",default=NULL,
help="Prefix of output files"),
make_option(c("-s", "--suffix"), action="store",default=NULL,
help="Suffix of output files"),
make_option(c("-d", "--dir"), action="store",default=NULL,
help="output dir")
)
opt <- parse_args(OptionParser(option_list=option_list))
a <- readLines(opt$template)
replace <- opt$replace
replace <- strsplit(replace,split = " ")[[1]]
map <- read.table(opt$mapping,sep = " ")
for (i in 1:nrow(map)){
b <- a
for (j in 1:length(replace)){
b <- gsub(replace[j],map[[j]][i],b)
}
outfcon <- file(paste0(opt$dir,opt$prefix,map$V1[i],".",opt$suffix), open="wt")
writeLines(b, con=outfcon)
close(outfcon)
}
|
3c76579cad4178ebd682675a5c7f380fe49095ca | 8ce03508019eeb4f40e46f5f9dfc15dc9d436282 | /plot4.R | 4aa07525f0f17883e2ca8931a2d6c50d1ee750aa | [] | no_license | JacGu/Exploratory-Data-Analysis | 77e9632f6477614123261d46204e2a4cf48a5bed | 30323380528bdaef8a4bfba50539de60f8f74c3b | refs/heads/master | 2021-01-01T05:23:21.893909 | 2016-05-18T16:26:24 | 2016-05-18T16:26:24 | 59,105,231 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,335 | r | plot4.R | ## Contruct a panelplot
# Read Data
hhPc<-read.table("hhpc.txt",header=TRUE)
# Set panel dimensions
par(mfrow=c(2,2))
# Plot 2 is panelplot 1
plot(hhPc$Global_active_power,type="l",ann=FALSE,xaxt="n")
axis(side=1,at=c(0,1440,2880),labels=c("Thu","Fri","Sat"))
title(ylab="Global Active Power (kilowatts)")
# Plot Voltage is panelplot 2
plot(hhPc$Voltage,type="l",ann=FALSE,xaxt="n")
axis(side=1,at=c(0,1450,2880),labels=c("Thu","Fri","Sat"))
title(ylab="Voltage",xlab="datetime")
# Plot 3 is panelplot 3
plot(hhPc$Sub_metering_1,type="l",col="black",ann=FALSE,xaxt="n")
lines(hhPc$Sub_metering_2,type="l",col="red")
lines(hhPc$Sub_metering_3,type="l",col="blue")
axis(side=1,at=c(0,1440,2880),labels=c("Thu","Fri","Sat"))
title(ylab="Energy sub metering")
legend("topright",pch="---",col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
legend("topright",pch="--",col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
# Plot 4 Global_reactive_power is panelplot 4
plot(hhPc$Global_reactive_power,type="l",ann=FALSE,xaxt="n")
axis(side=1,at=c(0,1450,2880),labels=c("Thu","Fri","Sat"))
title(ylab="Global_reactive_power",xlab="datetime")
# Copy to png file
dev.copy(png,file="panelplot.png",width=480,height=480)
dev.off(3)
|
41808eab401da4c1e6ece223777361c5b7b43442 | a9876fc0d9a0fcc003cf045a60aae968118de06c | /cachematrix.R | a89a21e9347c430055cbe147d83460dade3fdd9d | [] | no_license | ash2025/ProgrammingAssignment2 | 775da844bde05bcb255216601ee821839188d448 | 0cf305fb0df17c0aba00e336b85b7e66f8f40f12 | refs/heads/master | 2021-08-31T22:47:57.823962 | 2017-12-23T07:29:27 | 2017-12-23T07:29:27 | 115,169,316 | 0 | 0 | null | 2017-12-23T04:06:04 | 2017-12-23T04:06:04 | null | UTF-8 | R | false | false | 1,438 | r | cachematrix.R | ## Matrix inversion is usually a costly computation and there may be some
## benefit to caching the inverse of a matrix rather than compute it
##repeatedly. These two functions are used to cache the inverse
##of a matrix.
## makeCacheMatrix creates a list containing a function to
## 1 set the value of the matrix
# 2 get the value of the matrix
# 3 set the value of inverse of the matrix
# 4 get the value of inverse of the matrix
makeCacheMatrix <- function(A = matrix()) {
AI <- NULL
set <- function(B) {
A <<- B
AI <<- NULL
}
get <- function() A
setinverse <- function(inverse) AI <<- inverse
getinverse <- function() AI
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The following function returns the inverse of the matrix. It first checks
## if the inverse has already been computed. If so, it gets the result and
## skips computation. If not, it computes the inverse, sets the value in
## the cache via setinverse function.
# This function assumes the matrix is always invertible(nonsingular square)
cacheSolve <- function(A, ...) {
## Return a matrix that is the inverse of 'A'
AI <- A$getinverse()
if(!is.null(AI)) {
message("getting cached data")
return(AI)
}
data <- A$get()
AI <- solve(data, ...)
A$setinverse(AI)
AI
}
##A<-matrix(c(8,2,3,4,7,6,3,8,9),3,3)
##dit(A)
##x<-makeCacheMatrix(A)
## A %*% cacheSolve(x)
|
227fe933ee1adaf89b9149820bb134eb31975a75 | 570547372c4812300599010847c7bf1c0a6fcde1 | /tests/testthat/test-biplot.R | 9cb39558bb7dc7d32483a1a539b87d88ca276370 | [] | no_license | karthy257/gnm | b148e3a8cdb3429640dddd51a63dbd0a8bcc2882 | d98bbfd5ab86d54414b477db0240edd9874d7ee2 | refs/heads/master | 2021-03-06T11:49:57.261844 | 2020-02-03T09:21:22 | 2020-02-03T09:21:22 | 246,198,744 | 1 | 0 | null | 2020-03-10T03:24:07 | 2020-03-10T03:24:07 | null | UTF-8 | R | false | false | 1,836 | r | test-biplot.R | context("datasets [barley]")
# set seed to fix sign
suppressWarnings(RNGversion("3.0.0"))
set.seed(1)
# Gabriel, K R (1998). Generalised bilinear regression. Biometrika 85, 689–700.
test_that("biplot model as expected for barley data", {
biplotModel <- gnm(y ~ -1 + instances(Mult(site, variety), 2),
family = wedderburn, data = barley, verbose = FALSE)
expect_known_value(biplotModel,
file = test_path("outputs/biplotModel.rds"))
# rotate and scale fitted predictors
barleyMatrix <- xtabs(biplotModel$predictors ~ site + variety,
data = barley)
barleySVD <- svd(barleyMatrix)
A <- sweep(barleySVD$u, 2, sqrt(barleySVD$d), "*")[, 1:2]
B <- sweep(barleySVD$v, 2, sqrt(barleySVD$d), "*")[, 1:2]
rownames(A) <- levels(barley$site)
rownames(B) <- levels(barley$variety)
colnames(A) <- colnames(B) <- paste("Component", 1:2)
# compare vs matrices in Gabriel (1998): allow for sign change
# 3rd element in fit is 1.425 vs 1.42 in paper
expect_equivalent(round(A, 2),
matrix(c(4.19, 2.76, 1.43, 1.85, 1.27,
1.16, 1.02, 0.65, -0.15,
-0.39, -0.34, -0.05, 0.33, 0.16,
0.4, 0.73, 1.46, 2.13), nrow = 9))
expect_equivalent(round(B, 2),
matrix(c(-2.07, -3.06, -2.96, -1.81, -1.56,
-1.89, -1.18, -0.85, -0.97, -0.60,
-0.97, -0.51, -0.33, -0.50, -0.08,
1.08, 0.41, 1.15, 1.27, 1.40), nrow = 10))
# chi-square statistic approx equal to that reported
expect_equal(round(sum(residuals(biplotModel, type = "pearson")^2)), 54)
expect_equal(df.residual(biplotModel), 56)
})
|
34175f248a36f9dc8b7749a0cec38afeb4ab0ea9 | e037d1fb00eea42605f0558bb0ac31359ae0d56b | /scratch_plot.R | 24e1f4169680129100ca8b671b9f8cf93aa4a6a8 | [
"MIT"
] | permissive | momdadok/process_temp | 7ecdd284b362d4561a5bf950ef5eb8a03d5c3c74 | 8041d96481c5d4c2e28464ea110667d6e48493bf | refs/heads/master | 2021-01-09T20:09:11.348495 | 2016-08-11T22:06:20 | 2016-08-11T22:06:20 | 64,982,681 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,003 | r | scratch_plot.R | if(exists("scratch_gap")==FALSE){
scratch_gap<-data.frame(time=character(),temp=numeric(),gap=numeric(),date=character(),Time=character(),state=numeric())
scratch_gap$time<-as.POSIXct(scratch_gap$time)
scratch_gap$date<-as.POSIXct(scratch_gap$date)
scratch_gap$Time<-as.POSIXct(scratch_gap$Time)
}
first_date<-as.POSIXct(readline("enter file date in yyyy-mm-dd format: "))
path<-"C:\\Users\\Clai\\Documents\\Line_2\\"
first_filedate<-format(first_date,"%m%d%y")
all_data_file_path<-paste(path,"composite_data_",first_filedate,".txt",sep="")
gap_data_file_path<-paste(path,"scratch_in_gap_",first_filedate,".txt",sep="")
all_data<-read.delim(all_data_file_path)
gap_data<-read.delim(gap_data_file_path)
all_data$time<-as.POSIXct(all_data$time)
gap_data$time<-as.POSIXct(gap_data$time)
new_scratch_data<-all_data[all_data$loc=="scratcher_in",]
new_scratch_data$time<-as.POSIXct(new_scratch_data$time)
start_time<-new_scratch_data[1,1]
new_gap_data<-gap_data[match(start_time,gap_data$time):dim(gap_data)[1],]
new_gap_data$date<-first_date
new_gap_data$Time<-as.POSIXct(new_gap_data$time)
new_gap_data$temp<-new_gap_data$temp*1.8+32
new_gap_data$state<-new_scratch_data$state[1:dim(new_gap_data)[1]]
scratch_gap<-rbind(scratch_gap, new_gap_data)
median_temp_data<-na.omit(new_gap_data[new_gap_data$state==2,2])
median_temp<-data.frame(date=character(),mean=numeric(),stdev=numeric())
individual_median_temp<-data.frame(date=first_date,t(quantile(median_temp_data)),mean=mean(median_temp_data),stdev=sd(median_temp_data))
colnames(individual_median_temp)[2:6]<-c("min","1qr","median","3qr","max")
median_temp<-rbind(median_temp,individual_median_temp)
View(median_temp)
input_complete<-readline("input completed? y/n ")
while(input_complete=="n"){
date<-as.POSIXct(readline("enter file date in yyyy-mm-dd format: "))
path<-"C:\\Users\\Clai\\Documents\\Line_2\\"
filedate<-format(date,"%m%d%y")
all_data_file_path<-paste(path,"composite_data_",filedate,".txt",sep="")
gap_data_file_path<-paste(path,"scratch_in_gap_",filedate,".txt",sep="")
all_data<-read.delim(all_data_file_path)
gap_data<-read.delim(gap_data_file_path)
all_data$time<-as.POSIXct(all_data$time)
gap_data$time<-as.POSIXct(gap_data$time)
new_scratch_data<-all_data[all_data$loc=="scratcher_in",]
new_scratch_data$time<-as.POSIXct(new_scratch_data$time)
start_time<-new_scratch_data[1,1]
new_gap_data<-gap_data[match(start_time,gap_data$time):dim(gap_data)[1],]
new_gap_data$date<-date
new_gap_data$Time<-as.POSIXct(new_gap_data$time)
new_gap_data$temp<-new_gap_data$temp*1.8+32
new_gap_data$state<-new_scratch_data$state[1:dim(new_gap_data)[1]]
if(first_date<date){
new_gap_data$Time<-new_gap_data$Time-(date-first_date)
}
scratch_gap<-rbind(scratch_gap, new_gap_data)
median_temp_data<-na.omit(new_gap_data[new_gap_data$state==2,2])
individual_median_temp<-data.frame(date=date,t(quantile(median_temp_data)),mean=mean(median_temp_data),stdev=sd(median_temp_data))
colnames(individual_median_temp)[2:6]<-c("min","1qr","median","3qr","max")
median_temp<-rbind(median_temp,individual_median_temp)
View(median_temp)
input_complete<-readline("input completed? y/n ")
}
library(ggplot2)
scratch_plot<-ggplot(data=scratch_gap)+geom_point(aes(x=Time-3600*4,y=temp,color=factor(gap)),size=1)+theme_bw()
scratch_plot<-scratch_plot+scale_x_datetime(date_breaks="15 min",date_labels = "%H:%M")
scratch_plot<-scratch_plot+scale_y_continuous(breaks=c(100,125,150,175,200,225,250,275,300,325))
scratch_plot<-scratch_plot+scale_color_discrete(name="gap in stock?",labels=c("no video","startup/shutdown","no gap","gap"))
scratch_plot<-scratch_plot+theme(axis.text.x=element_text(angle=90))+facet_grid(date~.)
scratch_plot<-scratch_plot+guides(color=guide_legend(override.aes=list(size=3)))+xlab("time")
print(scratch_plot) |
6fa0afd00c9511fef7507ba5f2990c72aca12b60 | c2605a05ea9f80096aa85f60d6545268f0bc774a | /R/theme_soe.r | 9b31bbecd15e78aa9cb84e8da60fd6d1094f9837 | [
"Apache-2.0"
] | permissive | jayrbrown/envreportutils | 836d67b4c8d24f4bfa3e73229bcca73b7043e734 | e90b8075ec76d194b90ba98ae97593c82e6ba261 | refs/heads/master | 2021-01-14T10:23:36.576327 | 2015-04-24T22:24:57 | 2015-04-24T22:24:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,966 | r | theme_soe.r | # Copyright 2015 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
#' Default theme for EnvReportBC graphs and plots
#'
#' @import ggplot2 ggthemes extrafont
#' @param base_size base font size (default = 12)
#' @param base_family base font family (default = Verdana)
#' @param use_sizes use relative font sizes (?)
#' @export
#' @keywords plotting theme
#' @return returns a plot theme
theme_soe <- function(base_size=12, base_family="Verdana", use_sizes=TRUE) {
thm <- theme_soe_foundation(base_size = base_size, base_family = base_family,
use_sizes = use_sizes)
thm
}
#' Soe plot theme for facetted graphs
#'
#' @import ggplot2 ggthemes
#' @param base_size base font size (default = 12)
#' @param base_family base font family (default = Verdana)
#' @param use_sizes use relative font sizes (?)
#' @export
#' @keywords plotting theme
#' @return a ggplot2 theme
theme_soe_facet <- function(base_size=12, base_family="Verdana", use_sizes=TRUE) {
theme_soe_foundation(base_size = base_size, base_family = base_family,
use_sizes = use_sizes) +
theme(
panel.margin.x = unit(.6,"lines"),
panel.margin.y = unit(.6,"lines"),
panel.border = element_rect(colour = "black", fill = NA),
strip.background = element_rect(colour = "black", fill = "grey85"))
}
theme_soe_foundation <- function(base_size, base_family, use_sizes, facet) {
thm <- ggthemes::theme_foundation(base_size = base_size,
base_family = base_family,
use_sizes = use_sizes)
thm <- thm +
theme(
text = element_text(color="black"),
axis.line = element_line(colour="black"),
axis.text = element_text(color = 'black'),
axis.text.y = element_text(hjust = 1),
axis.ticks = element_blank(),
plot.title = element_text(vjust=2),
legend.title = element_text(face="plain"),
panel.background = element_blank(),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_line(colour = "grey80",size=0.5),
axis.title.y = element_text(vjust=1, angle = 90),
axis.title.x = element_text(vjust=0),
panel.margin.x = unit(0, "lines"),
panel.margin.y = unit(0, "lines"),
plot.background = element_blank(),
panel.border = element_blank(),
legend.key = element_blank())
thm
}
|
5d11fd5632d7cc5d212a0f5a9f7b5697852cf289 | 624e163c8bb48436986a61dcada147d269908a19 | /code/functions.R | 62c37a76f10e017e4c8c986218e5fdc47ce962ca | [] | no_license | commfish/2016_survey | f491dd7044c7aee4af6323c697531fc1813e12b5 | 4c9b77f356fded97dc56fe4b6ddca5a9ee4e11e6 | refs/heads/master | 2020-05-29T08:50:42.254100 | 2017-02-16T02:49:29 | 2017-02-16T02:49:29 | 69,285,788 | 0 | 1 | null | 2016-12-14T00:00:46 | 2016-09-26T19:35:10 | R | UTF-8 | R | false | false | 3,483 | r | functions.R | ####### 2016 Scallop Statewide Survey
####### Ben Williams / Katie Palof
####### ben.williams@alaska.gov / katie.palof@alaska.gov
# Functions to bootstrap scallop survey by numbers, weight, and meat weight ratio
# Function to summarize - does NOT include bootstrap
f.sum <- function(x){
# first turn the list to a dataframe
# use dplyr to summarise each list
# output is one row all stats.
x = as.data.frame(x)
x %>%
group_by(year, District, Bed, variable)%>%
summarise(n=mean(n),
area = mean(area_nm2) ,
dbar = (1/n*sum(di)),
var_dbar=1/((n)-1)*sum((di-dbar)^2) ,
cv=sqrt(var_dbar)/dbar*100,
ss=sum((di-dbar)^2),
N=area*dbar,
varN=(area^2)*1/n*1/(n-1)*ss,
cvN=sqrt(varN)/N*100) -> out
out
}
# bootstrap ----
# used for numbers and weights-------------
f.it <- function(x){
# first turn the list to a dataframe
# extract the identifiers to append to the results
# function to be run each time for calculating dbar & N
# function to sample by rows
# replicate the data 1000 times
x = as.data.frame(x)
y = x[1,c(2:4,8)]
boot.it <- function(x){
d_bar = sum(x$di)/mean(x$n)
N=mean(x$area_nm2)*d_bar
c(d_bar, N)
}
f.do <- function(x){
x %>% sample_n(nrow(.), replace=TRUE) -> x
boot.it(x)
}
as.data.frame(t(replicate(1000,f.do(x)))) -> out
names(out) <- c('dbar','N')
cbind(out,y)
}
# bootstrap II----
# used for meat weight ratio
f.wt <- function(x){
# function bootstraps meat weight ratio by bed, not by individual event
# first turn the list to a dataframe
# small function to group and calculate mean for each bootstrap sample
# replicate each sample 1000 x by year bed, district etc
# calculate ratio with function
x = as.data.frame(x)
f.do <- function(y){
y %>%
group_by(year,District,Bed) %>%
summarise(ratio = mean(ratio))
}
replicate(1000,sample_n(x, nrow(x), replace=T), simplify=FALSE) %>%
lapply(., f.do) %>%
bind_rows %>%
mutate(replicate=1:n())
}
#Clapper density summerization function --------------------
f.clap <- function(x){
# first turn the list to a dataframe
# use dplyr to summarise each list
# output is one row all stats.
x = as.data.frame(x)
x %>%
group_by(year, District, Bed)%>%
summarise(n=mean(n),
area = mean(area_nm2) ,
dbar_c = (1/n*sum(di)),
var_dbar_c=1/((n)-1)*sum((di-dbar_c)^2) ,
cv=sqrt(var_dbar_c)/dbar_c*100,
ss=sum((di-dbar_c)^2),
N_c=area*dbar_c,
varN_c = (area^2)*1/n*1/(n-1)*ss,
cvN_c = sqrt(varN_c)/N_c*100,
dbar_wt = (1/n*sum(di_wt)) ,
var_dbar_wt=1/((n)-1)*sum((di_wt-dbar_wt)^2) ,
cv_wt=sqrt(var_dbar_wt)/dbar_wt*100 ,
ss_wt=sum((di_wt-dbar_wt)^2) ,
Wt_c=area*dbar_wt ,
varWt_c=(area^2)*1/n*1/(n-1)*ss_wt ,
cvWt_c=sqrt(varWt_c)/Wt_c*100) -> out
out
}
# K-S Function ----
ks_func <- function(x){
#first turn the list into a dataframe
#use dplyr to seperate into two groups y and z to compare
# output is event id and p-value
x = as.data.frame(x)
x %>% spread(m_weight, height, fill=NA) ->inter
inter %>% summarise(n=n()) ->n
ks <-ks.test(inter$ht, inter$mw)
p.value <- ks$p.value
out <- cbind(n, p.value)
out
}
|
12c2cf7fbbbe7ed3aa663a6ea860212e1e9b76cc | 1c9c642a22f017cf9b1b097439ebe9609240c48a | /Graphical_Model/TimeVaryingGraphicalModel/R/3d_Case/3dexample.R | d6e743a89cb7d754d96164ce240b8f5dd7cadb42 | [] | no_license | MeileiJiang/machine-learning-study | df1d1a7d9cd400a42c78ae0eb3bfda25bec63c7b | 1c1b2a612f75d98916625e5d9282f451812bd5c3 | refs/heads/master | 2020-04-16T14:42:50.664315 | 2016-09-01T19:15:00 | 2016-09-01T19:15:00 | 37,352,735 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,809 | r | 3dexample.R | #############################################################################
## 3dexample.R
## Three dimensional graphical model. Only node 1 and node2 has interaction
## Author: Meilei
## Date: April 2016
#############################################################################
library(igraph)
library(ggplot2)
library(dplyr)
library(Matrix)
library(mvtnorm)
cfun = function(t){
if(t >= 0.2 & t <= 0.3) return(0.6 - 12*abs(t-0.25))
if(t >= 0.5 & t <= 0.6) return(-0.6 + 12*abs(t-0.55))
if(t > 0.8 & t <= 0.9) return(0.6 - 12*abs(t-0.85))
return(0)
}
grid = seq(0, 1, by = 0.01)
y = rep(0, length(grid))
for(i in 1:length(grid)){
y[i] = cfun(grid[i])
}
# c.df = data.frame( y, grid)
# # The ture coefficient functions
# c1.df = data.frame(beta12 = c.df$y, beta13 = rep(0, length(c.df$grid)), grid = c.df$grid)
# mc1.df = melt(c1.df, id.vars = "grid")
# c2.df = data.frame(beta21 = c.df$y, beta23 = rep(0.6, length(c.df$grid)), grid = c.df$grid)
# mc2.df = melt(c2.df, id.vars = "grid")
# c3.df = data.frame(beta31 = rep(0, length(c.df$grid)), beta32 = rep(0.6, length(c.df$grid)), grid = c.df$grid)
# mc3.df = melt(c3.df, id.vars = "grid")
#
# mc.df = rbind(mc1.df, mc2.df, mc3.df)
#
# ggplot(data = mc.df, aes(x = grid, y = -value)) +
# facet_wrap(~variable, ncol = 2) +
# geom_line(col = "blue") +
# scale_y_continuous(limits = c(-1,1)) +
# labs(x = "time", y = "partical correlation", title = "Partial correlation over time for each node")
#
# save(c.df, mc.df, file = "R/3d_Case/3dexample.RData")
Omegafun = function(t, cfun){
M = matrix(c(1, cfun(t), 0,
cfun(t), 1, 0.6,
0, 0.6, 1),
ncol = 3, nrow = 3)
colnames(M) = paste0("X", c(1:3))
rownames(M) = paste0("X", c(1:3))
return(M)
}
|
c88e5f0829d7d59893b9393d1d850d0ba54b15c3 | e55ac3e80a26f45269edc83fe37fc6c7ce084448 | /plot4.R | ddff3438185a0cc14cb3c8844dc564abadb2f812 | [] | no_license | mfcr/ExData_Plotting1 | 47a5d74b86cd670701bedfbd9417ff0730d69d77 | 66d0a4e52ad49d281fc4888090d22f3a05512133 | refs/heads/master | 2021-01-19T18:17:48.901463 | 2014-05-07T22:48:48 | 2014-05-07T22:48:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,637 | r | plot4.R | #CODE THAT IS REPEATED ACROSS AL FILES OF ASSIGMENT
# GET data.
data<-read.table("household_power_consumption.txt",header=TRUE,sep=";",na.strings="?")
#format dates
data[,1]<-as.Date(data[,1],"%d/%m/%Y")
# Select data where dates are only 2007-02-01 and 2007-02-02,
data<-consumption[consumption$Date %in% c("1/2/2007","2/2/2007"),]
# Join date and time and bind it to the existing data.
data<-cbind(strptime(paste(data[,1],data[,2]), "%Y-%m-%d %H:%M:%S"),data[,c(3:9)])
#END OF REPEATED CODE
# Call "png" graphic device and save the plot as "plot4.png".
png(file="plot4.png")
# set backgroung to transparent and create a 2x2 chart array
par(bg="transparent",mfcol=c(2,2))
# Plot the "Global active power".
plot(data$Date_Time,data$Global_active_power,xlab="",ylab="Global Active Power",type="n")
lines(data$Date_Time,data$Global_active_power)
# Plot the "Energy sub metering".
plot(data$Date_Time,data$Sub_metering_1,xlab="",ylab="Energy sub metering",type="n")
lines(data$Date_Time,data$Sub_metering_1,col="black")
lines(data$Date_Time,data$Sub_metering_2,col="red")
lines(data$Date_Time,data$Sub_metering_3,col="blue")
legend("topright",col=c("black","red","blue"),bty="n",lty=c(1,1,1),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
# Plot the "Voltage".
plot(data$Date_Time,data$Voltage,xlab="datetime",ylab="Voltage",type="n")
lines(data$Date_Time,data$Voltage)
# Plot the "Global reactive power".
plot(data$Date_Time,data$Global_reactive_power,xlab="datetime",ylab="Global_reactive_power",type="n")
lines(data$Date_Time,data$Global_reactive_power)
# Don't forget to turn off the graphic device.
dev.off()
|
5a4ea927142b7109ffd1d01b436847cebd455672 | 753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed | /service/paws.mediastoredata/man/delete_object.Rd | b5d2826ec131bf1b6d427ccc497848fe24bde46a | [
"Apache-2.0"
] | permissive | CR-Mercado/paws | 9b3902370f752fe84d818c1cda9f4344d9e06a48 | cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983 | refs/heads/master | 2020-04-24T06:52:44.839393 | 2019-02-17T18:18:20 | 2019-02-17T18:18:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 540 | rd | delete_object.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.mediastoredata_operations.R
\name{delete_object}
\alias{delete_object}
\title{Deletes an object at the specified path}
\usage{
delete_object(Path)
}
\arguments{
\item{Path}{[required] The path (including the file name) where the object is stored in the container. Format: <folder name>/<folder name>/<file name>}
}
\description{
Deletes an object at the specified path.
}
\section{Accepted Parameters}{
\preformatted{delete_object(
Path = "string"
)
}
}
|
ed41458a6befec0567a55827c94ea88810cac1f0 | 9d486d1318cf5df4a820caf1d4498ba16067c478 | /Scripts/01-data-overview.r | 4e84295d8ffe3fd84464fd6f209db24dc2dcb047 | [] | no_license | ihar/BSU-Workshop-2012 | 9fe8ebe6f0e40bad311ca7d98b1cc3780b191882 | fdd511c5f5af2a79940e2d86c5374431c0431287 | refs/heads/master | 2020-05-18T16:15:24.595701 | 2012-09-10T21:58:23 | 2012-09-10T21:58:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,587 | r | 01-data-overview.r | #
# Предварительный анализ данных. Визуализация.
#
## Визуализация объекта из тестового или обучающего множества
# \param digit.data строчка файла без метки класса
# \result изображение объекта
display.digit <- function(digit.data) {
an.object <- matrix(as.numeric(digit.data), nrow=28)
image(an.object, ylim=c(1, 0), col = gray(255:0/255), axes = F)
}
# Загружаем данные из файла с данными для обучения
train <- read.csv("../Data/train.csv")
# Структура обучающего множества
str(train)
# Количество объектов в обучающем множестве (=количеству строчек)
dim(train)[1]
# Метки классов
labels <- as.numeric(train[,1])
# Классы без меток
train <- train[,-1]
# Размер одного вектора признаков
length(train[1,])
# Количество объектов в каждом из 10 классов
# Видно, что представителей в каждом классе не одинаковое число,
# но примерно совпадает
table(labels)
# Что представляет собой отдельный вектор-объект.
# Номер (класс) объекта в базе для обучения
object.num <- 1090
# Какой именно объект под этим номером в базе
labels[object.num]
# Как он выглядит в виде изображения
display.digit(train[object.num,])
# Для визуализации отбираем по 10 первых представителей каждого класса.
vis.data <- c()
for (curr.class in 0:9) {
# Десятка первых элементов класса i
curr.class.set <- train[labels == curr.class,][1:10,]
vis.data <- rbind(vis.data, curr.class.set)
}
# Настройки для более красивого отображения нескольких объектов на одном листе
op <- par(no.readonly = TRUE)
par(mfrow=c(10,10))
par(mar=c(0, 0, 0, 0))
for (i in 1:100) {
display.digit(vis.data[i,])
}
par(op)
# Можно посмотреть на усреднённого представителя каждого класса
par(mfrow=c(2, 5))
par(mar=c(0, 0, 0, 0))
for (curr.class in 0:9){
average.class <- colMeans(train[labels == curr.class,])
display.digit(average.class)
}
par(op) |
0f56fdfcc4625d1af7cc8ca436c0cb1decd6c182 | 75a243536c9da0cd920a961ce58474670211fa67 | /code/Figure 1/Figure1D.R | efce0aeede8da3cb2fcbdc0b3bbc1dd62199f468 | [] | no_license | BatadaLab/scID_manuscript_figures | ab2ed0f6798c49cb1051be1c2daa6bb1ba6e072b | 75ea9c6f0bdc9844fb515da86c25bbc1f1ce84b0 | refs/heads/master | 2021-06-27T06:40:08.861832 | 2020-11-30T20:36:47 | 2020-11-30T20:36:47 | 169,872,160 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,823 | r | Figure1D.R | library(ggplot2)
library(scID)
ARI <- c()
# ------------------------------------------------------------------------------------------------------------------------
# Using Tirosh 2016
# ------------------------------------------------------------------------------------------------------------------------
gem <- readRDS("~/scID_manuscript_figures/data/Figure1/Tirosh2016_gem.rds")
labels <- readRDS("~/scID_manuscript_figures/data/Figure1/Tirosh2016_labels.rds")
scID_output <- scid_multiclass(target_gem = gem, reference_gem = gem, reference_clusters = labels,
logFC = 0.7, only_pos = T, estimate_weights_from_target = T)
scID_labels <- scID_output$labels
ARI[1] <- mclust::adjustedRandIndex(labels[names(which(scID_labels != "unassigned"))], scID_labels[names(which(scID_labels != "unassigned"))])
# ------------------------------------------------------------------------------------------------------------------------
# Using Montoro 2018
# ------------------------------------------------------------------------------------------------------------------------
gem <- readRDS("~/scID_manuscript_figures/data/Figure1/Montoro2018_gem.rds")
labels <- readRDS("~/scID_manuscript_figures/data/Figure1/Montoro2018_labels.rds")
scID_output <- scid_multiclass(target_gem = gem, reference_gem = gem, reference_clusters = labels,
logFC = 0.5, estimate_weights_from_target = T, only_pos = T)
scID_labels <- scID_output$labels
ARI[2] <- mclust::adjustedRandIndex(labels[names(which(scID_labels != "unassigned"))], scID_labels[names(which(scID_labels != "unassigned"))])
# ------------------------------------------------------------------------------------------------------------------------
# Using Hu 2017
# ------------------------------------------------------------------------------------------------------------------------
gem <- readRDS("~/scID_manuscript_figures/data/Figure1/Hu2017_gem.rds")
labels <- readRDS("~/scID_manuscript_figures/data/Figure1/Hu2017_labels.rds")
scID_output <- scid_multiclass(target_gem = gem, reference_gem = gem, reference_clusters = labels,
logFC = 0.4, estimate_weights_from_target = T, only_pos = T)
scID_labels <- scID_output$labels
ARI[3] <- mclust::adjustedRandIndex(labels[names(which(scID_labels != "unassigned"))], scID_labels[names(which(scID_labels != "unassigned"))])
# ------------------------------------------------------------------------------------------------------------------------
# Using Shekar 2016
# ------------------------------------------------------------------------------------------------------------------------
gem <- readRDS("~/scID_manuscript_figures/data/Figure2/Reference_gem.rds")
labels <- readRDS("~/scID_manuscript_figures/data/Figure2/Reference_clusters.rds")
scID_output <- scid_multiclass(target_gem = gem, reference_gem = gem, reference_clusters = labels,
logFC = 0.8, estimate_weights_from_target = T, only_pos = T)
scID_labels <- scID_output$labels
ARI[4] <- mclust::adjustedRandIndex(labels[names(which(scID_labels != "unassigned"))], scID_labels[names(which(scID_labels != "unassigned"))])
# ------------------------------------------------------------------------------------------------------------------------
# Plot results
# ------------------------------------------------------------------------------------------------------------------------
df <- data.frame(value=ARI, dataset = c("Tirosh", "Montoro", "Hu", "Shekhar"))
ggplot(df, aes(y=value, x=dataset)) + scale_y_continuous(limits = c(0, 1)) +
geom_bar(stat="identity", position = "dodge") +
theme(legend.position="none", text = element_text(size=10), plot.margin = unit(c(0,0,0,0), "cm")) +
labs(title = "", x="", y="")
|
edbf2edff01960c3605808179e3be97c6d9c29db | 360057961e6d4f30cb475463e2caf6e46c2b6b10 | /evaluation/workers/evaluatePerformance.R | c8d5de706b0f3a43814c25573da21aedef4afbe4 | [
"Apache-2.0"
] | permissive | rsanchezgarc/BIPSPI | 3f5611569d12f67f13e2124732d1170d5bb1a4de | 1d9801a176323ba238c8d10e673cf2055f83a4b6 | refs/heads/master | 2023-03-21T10:46:16.167852 | 2023-03-13T10:44:04 | 2023-03-13T10:44:04 | 134,728,928 | 9 | 3 | null | null | null | null | UTF-8 | R | false | false | 1,620 | r | evaluatePerformance.R | suppressMessages(library(pROC))
getRankFirstPos <- function(scoreDf){
x<-scoreDf[order(scoreDf$prediction,decreasing = T),]
return(which(x$categ==1)[1])
}
getNumHits <- function(scoresDf,numPairs=500){
scoresDf<- scoresDf[order(scoresDf$prediction,decreasing = T),]
scoresDf<- scoresDf[1:numPairs,"categ"]
scoresDf[scoresDf==-1]<-0
return(sum(scoresDf))
}
getPrecisionTopPairs <- function(scoresDf,numPairs=500){
scoresDf<- scoresDf[order(scoresDf$prediction,decreasing = T),]
categOfTopPairs<- scoresDf[1:numPairs,"categ"]
categOfTopPairs[categOfTopPairs==-1]<-0 #In case -1 is used as tag
return(sum(categOfTopPairs)/numPairs)
}
getAUC_ROC <- function(scoresDf){
return(roc(scoresDf$categ,scoresDf$prediction,direction = "<")$auc)
}
getFullEvaluation <- function(scoresDf,numPairs=500){
return(data.frame(RankFirstPos= getRankFirstPos(scoresDf),
PrecisionTopPairs= getPrecisionTopPairs(scoresDf,numPairs=500),
AUC_ROC= getAUC_ROC(scoresDf,numPairs=500)
)
)
}
getFullComparation <- function(scoresDf1,scoresDf2,
numPairs=500,numPairs2=numPairs){
return(data.frame(RankFirstPos1= getRankFirstPos(scoresDf1),
RankFirstPos2= getRankFirstPos(scoresDf2),
Precision1= getPrecisionTopPairs(scoresDf1,numPairs=numPairs),
Precision2= getPrecisionTopPairs(scoresDf2,numPairs=numPairs2),
AUC_ROC1= getAUC_ROC(scoresDf1,numPairs=numPairs),
AUC_ROC2= getAUC_ROC(scoresDf2,numPairs=numPairs2)
)
)
} |
d8f4e43da8732843a561825bec5356b5f385d2d1 | 831b461ddb4c2f9b2d4f973ea64826b48badd90c | /man/geo2utm.Rd | f821d024db0183fc3afc07dcefbc00889c1f8cf9 | [] | no_license | UCANR-IGIS/uavimg | 533a1b817a6da0cd6413087b185855e8ff657697 | 6e7c411f100780b7d8a373ccc13b3c6310c5285f | refs/heads/master | 2021-06-03T10:49:42.122819 | 2020-07-05T02:57:44 | 2020-07-05T02:57:44 | 123,742,652 | 2 | 1 | null | null | null | null | UTF-8 | R | false | true | 485 | rd | geo2utm.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geo2utm.R
\name{geo2utm}
\alias{geo2utm}
\title{Look up UTM zone}
\usage{
geo2utm(x, lat = NULL)
}
\arguments{
\item{x}{Longitude in decimal degrees. Can also be a numeric vector of length 2 containing longitude and latitude values.}
\item{lat}{Latitude in decimal degrees}
}
\value{
A \code{CRS} object containing the UTM zone
}
\description{
Finds the UTM zone for a geographic coordinate
}
\details{
}
|
f9c9f028c1423c5b5e08cf5fe24436a9186c682b | 6279d28d4f39868e29312bb711c9f74260563502 | /survivalInR/code/utils.R | 35cb97c656c407a6fd9f97260be8c3749d75fd56 | [] | no_license | OpenIntroOrg/stat-online-extras | d0369ce901dac7ab4b7da7ed3afa4a381edd3cde | 6314fff2d3ba25acb7de9996248ff83c07bc5e99 | refs/heads/master | 2021-01-01T15:28:47.430568 | 2017-07-18T18:01:00 | 2017-07-18T18:01:00 | 97,627,708 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 512 | r | utils.R | Cbind <- function(m1, m2){
if(is.vector(m1)){
m1 <- matrix(m1)
}
if(is.vector(m2)){
m2 <- matrix(m2)
}
l1 <- dim(m1)[1]
l2 <- dim(m2)[1]
d1 <- dim(m1)[2]
d2 <- dim(m2)[2]
if(l1 > l2){
m2 <- rbind(m2, matrix(NA, l1-l2, d2))
}
if(l1 < l2){
m1 <- rbind(m1, matrix(NA, l2-l1, d1))
}
cbind(m1, m2)
}
mendMatrix <- function(m){
d <- dim(m)
for(i in 1:d[2]){
temp <- m[,i]
temp <- temp[!is.na(temp)]
lt <- length(temp)
skip <- d[1] - lt
m[,i] <- NA
m[skip+1:lt,i] <- temp
}
m
}
|
95826e40a71f594a5269bb82435c8281be3bf65f | 57744ab6fedc2d4b8719fc51dce84e10189a0a7f | /rrdfqbpresent/R/PresentQbAsHtml.R | 2d314923e8fc67ce373d613c5af78ef492a1a061 | [] | no_license | rjsheperd/rrdfqbcrnd0 | 3e808ccd56ccf0b26c3c5f80bec9e4d1c83e4f84 | f7131281d5e4a415451dbd08859fac50d9b8a46d | refs/heads/master | 2023-04-03T01:00:46.279742 | 2020-05-04T19:10:43 | 2020-05-04T19:10:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,054 | r | PresentQbAsHtml.R | ##' Create HTML file for RDF data cube given by turtle file
##'
##' @param dataCubeFile
##' @param htmlfile
##' @param rowdim
##' @param coldim
##' @param idrow
##' @param idcol
##' @return
##' @examples
PresentQbAsHtml<- function( dataCubeFile, htmlfile, rowdim, coldim, idrow, idcol ) {
store <- new.rdf() # Initialize
cat("Loading ", dataCubeFile, "\n")
temp<-load.rdf(dataCubeFile, format="TURTLE", appendTo= store)
dsdName<- GetDsdNameFromCube( store )
domainName<- GetDomainNameFromCube( store )
forsparqlprefix<- GetForSparqlPrefix( domainName )
dimensions<- sparql.rdf(store, GetDimensionsSparqlQuery( forsparqlprefix ) )
attributesDf<- sparql.rdf(store, GetAttributesSparqlQuery( forsparqlprefix ))
outhtmlfile<- MakeHTMLfromQb( store, forsparqlprefix, dsdName, domainName,
dimensions, rowdim, coldim, idrow, idcol,
htmlfile, useRDFa=TRUE, compactDimColumns=FALSE,
debug=FALSE)
outhtmlfile
}
|
b5b823a4b42faedc2b7f135449a351b0888733bf | 7a180654ef4c6cffbacc1e9919f5834aae20e06f | /samsung/samsung.R | 26a15b3b162a0b6d0edfdd2e103eb40847971438 | [] | no_license | parkkuri/-project- | 6fd5856f26c66416f066555f3dde5e3256d06ccb | 1a2060e9562d29c6d217dc43d477c492cb5239e1 | refs/heads/master | 2020-05-09T14:28:26.798786 | 2019-04-13T16:08:08 | 2019-04-13T16:08:08 | 181,195,421 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 833 | r | samsung.R | setwd("C:\\Users\\myungjun\\Desktop\\명준\\2017-1\\경영프로그래밍\\project\\samsung")
library(stringr)
samsung<-readLines("samsunglist.txt", encoding ="UTF-8")#RHINO를 거친 output파일을 불러온다.
splt_sm<-str_split(samsung,", ")#','로 연결되어 이루어진 samsung을 split한다.
sort.sm<-sort(table(splt_sm), decreasing = TRUE)#단어들을 table화하여 빈도표로 만들고 순서를 부여한다.
write.csv(sort.sm,"samsung_freq_result.csv", row.names = FALSE)#명사빈도표 생성
smg<-read.csv("samsung_freq_result.csv", stringsAsFactors = F)
stopword<-readLines("stopwords.txt", encoding = "UTF-8")
a<-smg$splt_sm
aa<-c()
for(i in 1: length(stopword)){
dd<-grep(paste0("^",stopword[i],"$"),a)
aa<-c(aa,dd)
}
smg<-smg[-aa,]
write.csv(smg,"samsung_final.csv", row.names = F) |
101bc0764ea6056fbeb15a65d8c8f1bce6ac0f94 | 4012b414b3e84f143f3cfe7a416d674124f067f2 | /shiny/ui/S1_grd2rtc_tab_ui.R | a114f6fd6fb8a5c0b2dc9bc4dce6aea8c029ae66 | [
"MIT"
] | permissive | IvanLJF/opensarkit | aa80aa91679dffe3fd485cac5c1cfcca1c3bca2d | 8934c9a617ecf65ce21fd65dfe01df1df8db108f | refs/heads/master | 2020-12-30T15:41:19.407263 | 2017-04-10T14:04:04 | 2017-04-10T14:04:04 | 91,157,560 | 1 | 0 | null | 2017-05-13T07:34:39 | 2017-05-13T07:34:39 | null | UTF-8 | R | false | false | 11,359 | r | S1_grd2rtc_tab_ui.R | #-----------------------------------------------------------------------------
# S1 Tab
tabItem(tabName = "s1_grd2rtc",
fluidRow(
# Include the line below in ui.R so you can send messages
tags$head(tags$script(HTML('Shiny.addCustomMessageHandler("jsCode",function(message) {eval(message.value);});'))),
# for busy indicator
useShinyjs(),
tags$style(appCSS),
#----------------------------------------------------------------------------------
# Processing Panel Sentinel-1
box(
# Title
title = "Processing Panel", status = "success", solidHeader= TRUE,
tags$h4("Sentinel-1 GRD to RTC processor"),
hr(),
# AOI choice
radioButtons("s1_g2r_input_type", "Input type:",
c("Original File" = "file",
"Folder (batch processing)" = "folder",
"OST inventory shapefile (local/on server)" = "inventory",
"OST inventory shapefile (upload zipped archive)" = "zipfile")),
conditionalPanel(
"input.s1_g2r_input_type == 'file'",
shinyFilesButton("s1_g2r_zip","Choose a Sentinel-1 zip file","Choose a Sentinel-1 zip file",FALSE),
br(),
br(),
verbatimTextOutput("s1_g2r_zip_filepath"),
hr(),
tags$b("Output directory:"),
br(),
br(),
shinyDirButton("s1_g2r_outdir","Browse","Choose a directory",FALSE),
br(),
br(),
verbatimTextOutput("s1_g2r_outfolder"),
hr(),
radioButtons("s1_g2r_res", "Choose the output resolution:",
c("Medium Resolution (30m)" = "med_res",
"Full resolution (10m)" = "full_res")
)
),
conditionalPanel(
"input.s1_g2r_input_type == 'folder'",
shinyDirButton("s1_g2r_inputdir","Choose S1 DATA folder in your project directory","Choose the DATA folder inside your project directory",FALSE),
br(),
br(),
verbatimTextOutput("s1_g2r_inputfolder"),
hr(),
radioButtons("s1_g2r_res", "Choose the output resolution:",
c("Medium Resolution (30m)" = "med_res",
"Full resolution (10m)" = "full_res")
)
),
conditionalPanel(
"input.s1_g2r_input_type == 'inventory'",
shinyFilesButton("s1_g2r_shp","Choose S1 DATA file","Choose one or more files",FALSE),
br(),
br(),
verbatimTextOutput("s1_g2r_shp_filepath"),
hr(),
tags$b("Output directory"),
br(),
shinyDirButton("s1_g2r_outdir2","Browse","Choose a directory",FALSE),
br(),
br(),
verbatimTextOutput("s1_g2r_outfolder2"),
hr(),
radioButtons("s1_g2r_res", "Choose the output resolution:",
c("Medium Resolution (30m)" = "med_res",
"Full resolution (10m)" = "full_res")
),
hr(),
"NASA Earthdata username/password. If you are not in possess of a user account: ",
a(href = "https://urs.earthdata.nasa.gov/", target="_blank","Click Here!"),
textInput(inputId = "s1_asf_uname3",
label = "Username",
value = "Type in your username"
),
passwordInput(inputId = "s1_asf_piwo3",
label = "Password",
value = "Type in your password"
)
),
conditionalPanel(
"input.s1_g2r_input_type == 'zipfile'",
fileInput('S1_grd2rtc_zipfile_path', label = 'Browse',accept = c(".zip")),
hr(),
tags$b("Output directory"),
br(),
shinyDirButton("s1_g2r_outdir3","Browse","Choose a directory",FALSE),
br(),
br(),
verbatimTextOutput("s1_g2r_outfolder3"),
hr(),
radioButtons("s1_g2r_res", "Choose the output resolution:",
c("Medium Resolution (30m)" = "med_res",
"Full resolution (10m)" = "full_res")
),
hr(),
"NASA Earthdata username/password. If you are not in possess of a user account: ",
a(href = "https://urs.earthdata.nasa.gov/", target="_blank","Click Here!"),
textInput(inputId = "s1_asf_uname4",
label = "Username",
value = "Type in your username"
),
passwordInput(inputId = "s1_asf_piwo4",
label = "Password",
value = "Type in your password"
)
),
hr(),
withBusyIndicatorUI(
actionButton("s1_g2r_process", "Start processing")
),
br(),
#"Output:",
textOutput("processS1_G2R")
), #close box
# #----------------------------------------------------------------------------------
# # Info Panel
box(
title = "Info Panel", status = "success", solidHeader= TRUE,
tabBox(width = 700,
tabPanel("General Info",
hr(),
p("Sentinel-1 Ground Range Detected (GRD) products are operationally generated by the
Payload Data Ground Segment (PDGS) of ESA. From all available products, those images have undergone the most preprocessing
steps, including azimuth and range compression (i.e. SAR focusing), slant to ground range during which the phase information
is lost. Therefore advanced interferometric and polarimetric data analysis are not possible.
On the other hand, the products exhibit only 1/7th of the size of an Single-Look Complex (SLC) image
and further processign time is considerably reduced."),
p("This processor autmatically applies the missing steps to generate Radiometrically-Terrain-Corrected (RTC) Products that
are suited for land cover classification. ")
),
tabPanel("Processing",
tags$h4("Processing Chain"),
hr(),
tags$b("1. Apply Orbit File"),
p("Precise orbit state vectors are necessary for geolocation and radiometric correction. The orbit state vectors provided
in the metadata of a SAR product are generally
not accurate and can be refined with the precise orbit files which are available days-to-weeks
after the generation of the product."),
p("The orbit file provides accurate satellite position and velocity information.
Based on this information, the orbit state vectors in the abstract metadata of the product are updated."),
p("For Sentinel-1, Restituted orbit files and Preceise orbit files may be applied. Precise orbits are produced
a few weeks after acquisition. Orbit files are automatically downloaded."),
hr(),
tags$b("2. Thermal noise removal"),
p("Level-1 products provide a noise LUT for each measurement data set. The values in the de-noise LUT, provided in linear power,
can be used to derive calibrated noise profiles matching the calibrated GRD data."),
hr(),
tags$b("3. GRD Border Noise Removal"),
p("The Sentinel-1 (S-1) Instrument Processing Facility (IPF) is responsible for generating the complete family of Level-1
and Level-2 operation products. The processing of the RAW data into L1 products features number of processing
steps leading to artefacts at the image borders. These processing steps are mainly the azimuth and /range compression and
the sampling start time changes handling that is necessary to compensate for the change of earth curvature.
The latter is generating a number of leading and trailing “no-value” samples that depends on the data-take length
that can be of several minutes. The former creates radiometric artefacts complicating the detection of the “no-value” samples.
These “no-value” pixels are not null but contain very low values which complicates the masking based on thresholding.
This operator implements an algorithm allowing masking the \"no-value\" samples efficiently with thresholding method."),
hr(),
tags$b("4. Terrain Flattening"),
p("When land cover classification is applied to terrain that is not flat, inaccurate classification result is produced.
This is because that terrain variations. affect not only the position of a target on the Earth's surface, but also the
brightness of the radar return. Without treatment, the radiometric biases caused by terrain variations are introduced
into the coherency and covariance mstrices. It is often seen that the classification result mimic the radiometry rathen
than the actual land cover. This operator removes the radiometric variability associated with topography using the
terrain flattening method proposed by Small [1] while leaving the radiometric variability associated with land cover.")
),
tabPanel("References",
p("Small, D. (2011): Flattening Gamma: Radiometric Terrain Correction for SAR imagery. in:
IEEE Transaction on Geoscience and Remote Sensing, Vol. 48, No. 8, ")
)
)
)
) # close fluid row
) # close tabitem
|
0741fddd376719ba9c79dabd7c299e3785ab12bb | 613d08fbfa4a938342c308857a378ce41ef98b38 | /man/list_deps.Rd | 8da0c4f9b5bac146fe9848693488042b8b336c70 | [] | no_license | cran/reportfactory | 14d8cb298eac6c5906f45faad3ae0edbce11bc46 | eaba631bf91e8cf76a9016e49ca12d55e3cdb2ef | refs/heads/master | 2023-07-05T21:25:41.721322 | 2021-08-09T11:30:02 | 2021-08-09T11:30:02 | 334,202,917 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,521 | rd | list_deps.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/list_deps.R
\name{list_deps}
\alias{list_deps}
\title{List dependencies of reports within a factory}
\usage{
list_deps(
factory = ".",
missing = FALSE,
check_r = TRUE,
exclude_readme = TRUE,
parse_first = FALSE
)
}
\arguments{
\item{factory}{The path to the report factory or a folder within the desired
factory. Defaults to the current directory.}
\item{missing}{A logical indicating if only missing dependencies should be
listed (\code{TRUE}); otherwise, all packages needed in the reports are listed;
defaults to \code{FALSE}.}
\item{check_r}{If true, R scripts contained within the factory will also be
checked. Note that this will error if the script cannot be parsed.}
\item{exclude_readme}{If TRUE (default) README files will not be checked for
dependencies.}
\item{parse_first}{If \code{TRUE} code will first be parsed for validity and
unevaluated Rmd chunks will not be checked for dependencies. The default
value is \code{FALSE} and, in this case, files will simply be checked line by
line for calls to \code{library}, \code{require} or use of double, \code{::}, and triple,
\code{:::} function calls.}
}
\value{
A character vector of package dependencies.
}
\description{
List package dependencies based on the reports and scripts within the
report_sources and scripts directories respectively.
}
\note{
This function requires that any R scripts present in the factory are
valid syntax else the function will error.
}
|
0b2b2a485bbcaee3e7cef6e42465c4d8ff8999b1 | 6f539c275d5f6c0325d76a580ac5b029290a5b77 | /R/regression_spline.r | 35bb5a64c00858806d861c0a6b97279f290d2c90 | [] | no_license | wenbo5565/misc | 2a2ee34972fcb005b68e831cc5132c42494fb406 | 9eeebe4f64ffbf2f56a1734f002c9730d190ce31 | refs/heads/master | 2020-03-26T07:02:43.360520 | 2018-08-20T00:56:01 | 2018-08-20T00:56:01 | 144,633,856 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,573 | r | regression_spline.r | # Read in Data
yy=matrix(scan("Diabold_Li_data.txt"),ncol=19,byrow=T)
x=yy[1,2:19] ## x is the time to maturity (1 month, 2 months, etc.)
date = 19900531
y=yy[yy[,1]==date,2:19] ## term structure at 1990.05.31
##=======data====================
plot(x,y,xlab="time to maturity",ylab="interest rate")
##=======local polynomial========
library(KernSmooth)
dpill(x,y)
out.localPoly = locpoly(x,y,bandwidth=dpill(x,y))
lines(out.localPoly$x,out.localPoly$y,lty=1,col='blue')
##=======Basis Spline============
library(splines)
xx = bs(x,knots=2)
summary(lm(y~xx))
lines(x,lm(y~xx)$fit,lty=2,col='red')
##=======smoothing spline========
out.s=smooth.spline(x,y,cv=TRUE) ## ordinary Cross Validation
lines(out.s$x,out.s$y,lty=3,col='green')
##=======optimal lambda N-S curve approach===========
lam11 = 0.1975 ## optimal lambda getting from hw 7
##======== function to obtain Nelson-Siegel curve on a dense set of points
plotNScurve=function(min=1,max=120,lam=0.057,coef){
x=min+(1:1000)/1000*(max-min) ## x is sequence of time
zz1=(1-exp(-lam*x))/lam/x
zz2=zz1-exp(-lam*x) ##
y=coef[1]+coef[2]*zz1+coef[3]*zz2 ## interest rate
return(list(x=x,y=y))}
##======N-S model and plot=========================
zz1=(1-exp(-lam11*x))/lam11/x
zz2=zz1-exp(-lam11*x)
xx=cbind(zz1,zz2)
outNS1=lm(y~xx)
plotNS1=plotNScurve(min=1,max=120,lam=lam11,coef=outNS1$coef)
lines(plotNS1$x,plotNS1$y,main="N-S Curve",lty=4,col='black')
legend('bottomright',cex=1,lty=1,legend=c("local polynomial","basis spline","smoothing spline","N-S optimal lambda"),col=c("blue","red","green","black"))
|
f5b83b136eb9d992666274cb16f36f9fd6fb3979 | 8a0fa0382c47572f82484eace2a3330d85b3c4fb | /man/dna_convert.Rd | 9c3848124fcf95cc1f94c3eb4e363b69a7f83922 | [
"MIT"
] | permissive | herrmannrobert/GenArt | 62409d482eeb66ffc52f075b976303054ad71985 | cd9dac81e3b2d9a22bb7c43ae024c89cb4e92551 | refs/heads/master | 2020-05-31T07:19:33.019300 | 2019-06-11T12:42:52 | 2019-06-11T12:42:52 | 190,163,049 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 884 | rd | dna_convert.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Functions.R
\name{dna_convert}
\alias{dna_convert}
\title{Convert image DNA to PNG format}
\usage{
dna_convert(dna, maxXY, tempf, pngWH, bg = "white")
}
\arguments{
\item{dna}{matrix or character, untangled or tangled image DNA of any size.}
\item{tempf}{temporate file generated by default or given as file path.}
\item{pngWH}{vector, width and height of reconstructed image. If missing, width and height of original image are used.}
\item{bg}{character, color or RGB code indicating the background color of PNG.}
}
\description{
Function converts image DNA to array object including RGB or gray scale for each pixel.
}
\details{
See example...
}
\examples{
dna <- dna_untangle(dna_in(rgb = FALSE))
for(i in 1:20){
dna <- dna_mutate(dna)
}
test <- dna_convert(dna)
grid::grid.raster(test)
test[1,1,]
}
|
5355a88752d1c1df98d8261ff6a39982fd6ef670 | 17a8c230b33a3167179628573096d4b6ec3957a4 | /man/nice_vertex_labels.Rd | ade6cf6a9afb034f2fddf9bf3a7212e2638e26b1 | [
"MIT"
] | permissive | NirvanaNimbusa/shortestpath | 4066c6c431c334156aad43be5379d8e6f9c45a9a | 3ff827490c2fcb514853ba5eb2c6dd4129bfac85 | refs/heads/master | 2023-01-30T16:48:24.137655 | 2020-12-14T12:20:11 | 2020-12-14T12:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 364 | rd | nice_vertex_labels.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{nice_vertex_labels}
\alias{nice_vertex_labels}
\title{Produce "name (current min dist)" labels for all vertices.}
\usage{
nice_vertex_labels(graph)
}
\arguments{
\item{graph}{The spgraph object.}
}
\description{
Produce "name (current min dist)" labels for all vertices.
}
|
ec134b12fba71ee9c70ecb6196a4073fbcc8674f | 73e638dc549babb1034d2c103aa9b6fcdc5d7322 | /examples/r2d2marg.R | 863674154a33686c9ddf634a419d44daab51545c | [] | no_license | yandorazhang/R2D2 | b11e8c46902949a55ab4a1a0fe4f65687c915cc0 | e734639929abb60e616c114ac7fe4e2beb5c7f9d | refs/heads/master | 2023-01-14T08:39:13.803273 | 2020-11-18T13:43:30 | 2020-11-18T13:43:30 | 282,605,509 | 6 | 0 | null | null | null | null | UTF-8 | R | false | false | 633 | r | r2d2marg.R | rho <- 0.5
# Number of predictors
p <- 25
# Number of observations
n <- 60
# Construct beta
n_nonzero <- 5
beta <- rep(0, p)
set.seed(1)
beta[11:(10 + n_nonzero)] <- stats::rt(n_nonzero, df = 3) * sqrt(0.5/(3 * n_nonzero/2))
# Construct x
sigma <- 1
times <- 1:p
H <- abs(outer(times, times, "-"))
V <- sigma * rho^H
x <- mvtnorm::rmvnorm(n, rep(0, p), V)
x <- scale(x, center = TRUE, scale = FALSE)
# Construct y
y <- x %*% beta + stats::rnorm(n)
# Gibbs sampling
mcmc.n <- 10000
fit.new <- r2d2marg(x = x, y = y, mcmc.n = mcmc.n, print = FALSE)
# Discard the early samples
burnIn <- 5000
beta.new <- fit.new$beta[burnIn:10000, ]
|
b758dde521463f2d748abcb26da7c4f99cffd90c | 3ea724d02946007e84fd73ad410e3f2c2379f434 | /scratch/demo 1.R | 5ce6daced5bf943db96f10fed03e1f13cab5850a | [] | no_license | SamEdwardes/location-predictions | c0516960074ea601f7ee4150cc91c971f283a19b | 8a689191de03fd75d8e934a34c05a7e867e4d2bd | refs/heads/master | 2020-06-19T18:08:17.232928 | 2019-07-15T00:49:10 | 2019-07-15T00:49:10 | 196,815,091 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 590 | r | demo 1.R | library(httr)
library(jsonlite)
my_token <- "oumhlAJjYfVbHlnFZrdfsHSLuQWWFhMU"
email <- "edwardes.s@gmail.com"
# base <- "https://www.ncdc.noaa.gov/cdo-web/api/v2"
# endpoint <- "data"
call <- "https://www.ncdc.noaa.gov/cdo-web/api/v2/data?datasetid=GHCND&locationid=ZIP:28801&startdate=2010-05-01&enddate=2010-05-01"
get_weather <- GET(call, add_headers(token = my_token))
get_weather_text <- content(get_weather, "text")
get_weather_text
# convert to JSON
get_weather_json <- fromJSON(get_weather_text, flatten = TRUE)
get_weather_df <- as.data.frame(get_weather_json)
get_weather_df |
062a68f74ab0c685cf8799a651712f2f656e264b | 3a6fa2e7370f06fefc35b327a157e11cb40fb7a7 | /man/prior_check.Rd | 0cb2c3c36e0a63e59b1e83b24bc1053324328503 | [
"MIT"
] | permissive | JHart96/bisonR | 70e5294ea3cc08d80e8815d9a9ee64100cda53db | f1d1b0731fe63c4c6e01f877e6040f313cdfabb5 | refs/heads/main | 2023-08-18T03:18:45.911681 | 2023-07-28T18:06:39 | 2023-07-28T18:06:39 | 471,447,630 | 4 | 1 | NOASSERTION | 2023-07-28T18:06:41 | 2022-03-18T16:52:44 | R | UTF-8 | R | false | true | 920 | rd | prior_check.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/priors.R
\name{prior_check}
\alias{prior_check}
\title{Prior checks}
\usage{
prior_check(priors, model_type, type = "density")
}
\arguments{
\item{priors}{List of priors for a model, can be retrieved using \code{get_default_prior()}.}
\item{model_type}{Type of model the priors will be used for (same as the argument for \code{get_default_prior()}).}
\item{type}{Type of prior check to run, \code{"value"} or \code{"prediction"}. Details below.}
}
\description{
Prior checks
}
\details{
The parameter \code{type} determines what type of prior check to run. \code{type="value"} will
plot the prior probability over the parameter value on the original scale. \code{type="predictive"}
will run a prior predictive plot, where predictions from the model are generated using only
prior probabilities (the model when not updated from the data).
}
|
2a0c01f8e6ae53ae17605e2ce65a73a1674ca386 | e9aed7e25b138c033460e2e434f8e34905fe55ff | /zadania_5.R | 688a4e20efa6e6f42d771518f0f3d2d3347546d8 | [] | no_license | stepien-j/tipn-r-projects | ac3e8dc430ed33af92eca04f28c11ae7395ef252 | 71e1ea56202701dbc946b06ed3fd246ff1807d96 | refs/heads/master | 2020-03-18T21:20:05.482904 | 2018-05-05T16:02:38 | 2018-05-05T16:02:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 366 | r | zadania_5.R | library(ggplot2)
library(dplyr)
# Zad_5-1
library(readr)
movies <- read_csv("movies.csv")
View(movies)
# Zad_5-2
filter(movies, year == 2005)
# Zad_5-3
movies %>% select(title, year, budget) %>% arrange(desc(budget))
# Zad_5-4
movies %>% select(Animation, year) %>% filter(year == 1990)
# Zad_5-5
movies %>% select(Drama, length) %>% arrange(desc(length)) |
6ef128363fa0d18b0c72cbba18d4b507c79e5274 | 58efa400972c747e26801b24252aea5bbe08d6a0 | /R/plotly_methods.R | 2c37071c2df72a056bb682286d02d157ac69390a | [] | no_license | shaoyoucheng/tidyseurat | 07a5095e936fb529a66348c6fd954592f53e31d2 | 784066fb6dcdb019b86c885253633bede0aee0b8 | refs/heads/master | 2023-07-20T14:02:49.004900 | 2021-08-19T02:06:20 | 2021-08-19T02:06:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,017 | r | plotly_methods.R | #' Initiate a plotly visualization
#'
#' This function maps R objects to [plotly.js](https://plot.ly/javascript/),
#' an (MIT licensed) web-based interactive charting library. It provides
#' abstractions for doing common things (e.g. mapping data values to
#' fill colors (via `color`) or creating [animation]s (via `frame`)) and sets
#' some different defaults to make the interface feel more 'R-like'
#' (i.e., closer to [plot()] and [ggplot2::qplot()]).
#'
#' @details Unless `type` is specified, this function just initiates a plotly
#' object with 'global' attributes that are passed onto downstream uses of
#' [add_trace()] (or similar). A [formula] must always be used when
#' referencing column name(s) in `data` (e.g. `plot_ly(mtcars, x=~wt)`).
#' Formulas are optional when supplying values directly, but they do
#' help inform default axis/scale titles
#' (e.g., `plot_ly(x=mtcars$wt)` vs `plot_ly(x=~mtcars$wt)`)
#'
#' @param data A data frame (optional) or [crosstalk::SharedData] object.
#' @param ... Arguments (i.e., attributes) passed along to the trace `type`.
#' See [schema()] for a list of acceptable attributes for a given trace `type`
#' (by going to `traces` -> `type` -> `attributes`). Note that attributes
#' provided at this level may override other arguments
#' (e.g. `plot_ly(x=1:10, y=1:10, color=I("red"), marker=list(color="blue"))`).
#' @param type A character string specifying the trace type
#' (e.g. `"scatter"`, `"bar"`, `"box"`, etc).
#' If specified, it *always* creates a trace, otherwise
#' @param name Values mapped to the trace's name attribute. Since a trace can
#' only have one name, this argument acts very much like `split` in that it
#' creates one trace for every unique value.
#' @param color Values mapped to relevant 'fill-color' attribute(s)
#' (e.g. [fillcolor](https://plot.ly/r/reference#scatter-fillcolor),
#' [marker.color](https://plot.ly/r/reference#scatter-marker-color),
#' [textfont.color](https://plot.ly/r/reference/#scatter-textfont-color), etc.).
#' The mapping from data values to color codes may be controlled using
#' `colors` and `alpha`, or avoided altogether via [I()]
#' (e.g., `color=I("red")`).
#' Any color understood by [grDevices::col2rgb()] may be used in this way.
#' @param colors Either a colorbrewer2.org palette name
#' (e.g. "YlOrRd" or "Blues"),
#' or a vector of colors to interpolate in hexadecimal "#RRGGBB" format,
#' or a color interpolation function like `colorRamp()`.
#' @param stroke Similar to `color`, but values are mapped to relevant 'stroke-color' attribute(s)
#' (e.g., [marker.line.color](https://plot.ly/r/reference#scatter-marker-line-color)
#' and [line.color](https://plot.ly/r/reference#scatter-line-color)
#' for filled polygons). If not specified, `stroke` inherits from `color`.
#' @param strokes Similar to `colors`, but controls the `stroke` mapping.
#' @param alpha A number between 0 and 1 specifying the alpha channel applied to `color`.
#' Defaults to 0.5 when mapping to [fillcolor](https://plot.ly/r/reference#scatter-fillcolor) and 1 otherwise.
#' @param alpha_stroke Similar to `alpha`, but applied to `stroke`.
#' @param symbol (Discrete) values mapped to [marker.symbol](https://plot.ly/r/reference#scatter-marker-symbol).
#' The mapping from data values to symbols may be controlled using
#' `symbols`, or avoided altogether via [I()] (e.g., `symbol=I("pentagon")`).
#' Any [pch] value or [symbol name](https://plot.ly/r/reference#scatter-marker-symbol) may be used in this way.
#' @param symbols A character vector of [pch] values or [symbol names](https://plot.ly/r/reference#scatter-marker-symbol).
#' @param linetype (Discrete) values mapped to [line.dash](https://plot.ly/r/reference#scatter-line-dash).
#' The mapping from data values to symbols may be controlled using
#' `linetypes`, or avoided altogether via [I()] (e.g., `linetype=I("dash")`).
#' Any `lty` (see [par]) value or [dash name](https://plot.ly/r/reference#scatter-line-dash) may be used in this way.
#' @param linetypes A character vector of `lty` values or [dash names](https://plot.ly/r/reference#scatter-line-dash)
#' @param size (Numeric) values mapped to relevant 'fill-size' attribute(s)
#' (e.g., [marker.size](https://plot.ly/r/reference#scatter-marker-size),
#' [textfont.size](https://plot.ly/r/reference#scatter-textfont-size),
#' and [error_x.width](https://plot.ly/r/reference#scatter-error_x-width)).
#' The mapping from data values to symbols may be controlled using
#' `sizes`, or avoided altogether via [I()] (e.g., `size=I(30)`).
#' @param sizes A numeric vector of length 2 used to scale `size` to pixels.
#' @param span (Numeric) values mapped to relevant 'stroke-size' attribute(s)
#' (e.g.,
#' [marker.line.width](https://plot.ly/r/reference#scatter-marker-line-width),
#' [line.width](https://plot.ly/r/reference#scatter-line-width) for filled polygons,
#' and [error_x.thickness](https://plot.ly/r/reference#scatter-error_x-thickness))
#' The mapping from data values to symbols may be controlled using
#' `spans`, or avoided altogether via [I()] (e.g., `span=I(30)`).
#' @param spans A numeric vector of length 2 used to scale `span` to pixels.
#' @param split (Discrete) values used to create multiple traces (one trace per value).
#' @param frame (Discrete) values used to create animation frames.
#' @param width Width in pixels (optional, defaults to automatic sizing).
#' @param height Height in pixels (optional, defaults to automatic sizing).
#' @param source a character string of length 1. Match the value of this string
#' with the source argument in [event_data()] to retrieve the
#' event data corresponding to a specific plot (shiny apps can have multiple plots).
#' @author Carson Sievert
#' @references <https://plotly-r.com/overview.html>
#' @seealso \itemize{
#' \item For initializing a plotly-geo object: [plot_geo()]
#' \item For initializing a plotly-mapbox object: [plot_mapbox()]
#' \item For translating a ggplot2 object to a plotly object: [ggplotly()]
#' \item For modifying any plotly object: [layout()], [add_trace()], [style()]
#' \item For linked brushing: [highlight()]
#' \item For arranging multiple plots: [subplot()], [crosstalk::bscols()]
#' \item For inspecting plotly objects: [plotly_json()]
#' \item For quick, accurate, and searchable plotly.js reference: [schema()]
#' }
#'
#' @return A plotly
#'
#' @importFrom plotly plot_ly
#'
#' @export
#' @examples
#' \dontrun{
#' # plot_ly() tries to create a sensible plot based on the information you
#' # give it. If you don't provide a trace type, plot_ly() will infer one.
#' plot_ly(economics, x=~pop)
#' plot_ly(economics, x=~date, y=~pop)
#' # plot_ly() doesn't require data frame(s), which allows one to take
#' # advantage of trace type(s) designed specifically for numeric matrices
#' plot_ly(z=~volcano)
#' plot_ly(z=~volcano, type="surface")
#'
#' # plotly has a functional interface: every plotly function takes a plotly
#' # object as it's first input argument and returns a modified plotly object
#' add_lines(plot_ly(economics, x=~date, y=~ unemploy / pop))
#'
#' # To make code more readable, plotly imports the pipe operator from magrittr
#' economics %>%
#' plot_ly(x=~date, y=~ unemploy / pop) %>%
#' add_lines()
#'
#' # Attributes defined via plot_ly() set 'global' attributes that
#' # are carried onto subsequent traces, but those may be over-written
#' plot_ly(economics, x=~date, color=I("black")) %>%
#' add_lines(y=~uempmed) %>%
#' add_lines(y=~psavert, color=I("red"))
#'
#' # Attributes are documented in the figure reference -> https://plot.ly/r/reference
#' # You might notice plot_ly() has named arguments that aren't in this figure
#' # reference. These arguments make it easier to map abstract data values to
#' # visual attributes.
#' p <- plot_ly(iris, x=~Sepal.Width, y=~Sepal.Length)
#' add_markers(p, color=~Petal.Length, size=~Petal.Length)
#' add_markers(p, color=~Species)
#' add_markers(p, color=~Species, colors="Set1")
#' add_markers(p, symbol=~Species)
#' add_paths(p, linetype=~Species)
#' }
#'
plot_ly <- function(data=data.frame(), ..., type=NULL, name=NULL,
color=NULL, colors=NULL, alpha=NULL,
stroke=NULL, strokes=NULL, alpha_stroke=1,
size=NULL, sizes=c(10, 100),
span=NULL, spans=c(1, 20),
symbol=NULL, symbols=NULL,
linetype=NULL, linetypes=NULL,
split=NULL, frame=NULL,
width=NULL, height=NULL, source="A") {
UseMethod("plot_ly")
}
#' @export
#'
plot_ly.tbl_df <- function(data=data.frame(), ..., type=NULL, name=NULL,
color=NULL, colors=NULL, alpha=NULL,
stroke=NULL, strokes=NULL, alpha_stroke=1,
size=NULL, sizes=c(10, 100),
span=NULL, spans=c(1, 20),
symbol=NULL, symbols=NULL,
linetype=NULL, linetypes=NULL,
split=NULL, frame=NULL,
width=NULL, height=NULL, source="A") {
data %>%
# This is a trick to not loop the call
drop_class("tbl_df") %>%
plotly::plot_ly(...,
type=type, name=name,
color=color, colors=colors, alpha=alpha,
stroke=stroke, strokes=strokes, alpha_stroke=alpha_stroke,
size=size, sizes=sizes,
span=span, spans=spans,
symbol=symbol, symbols=symbols,
linetype=linetype, linetypes=linetypes,
split=split, frame=frame,
width=width, height=height, source=source
)
}
#' @export
plot_ly.Seurat <- function(data = data.frame(), ..., type = NULL, name= NULL,
color= NULL, colors = NULL, alpha = NULL,
stroke= NULL, strokes = NULL, alpha_stroke = 1,
size= NULL, sizes = c(10, 100),
span= NULL, spans = c(1, 20),
symbol= NULL, symbols = NULL,
linetype= NULL, linetypes = NULL,
split= NULL, frame= NULL,
width = NULL, height = NULL, source = "A") {
data %>%
# This is a trick to not loop the call
as_tibble() %>%
plot_ly( ...,type = type, name = name,
color = color, colors = colors, alpha = alpha,
stroke =stroke, strokes = strokes, alpha_stroke = alpha_stroke,
size = size, sizes = sizes,
span = span, spans = spans,
symbol = symbol, symbols = symbols,
linetype = linetype, linetypes = linetypes,
split = split, frame = frame,
width = width, height = height, source =source)
}
|
097b76dfa94da749d27e7142444ddf66a605c7cd | be673221c51b37608c57173c02c13e54b106d590 | /ejemplos.R | 5921c29dcdda7d148346c45bf3143d0ccf7e5f74 | [] | no_license | rulits/Data-Science | c2e5b7feac8751809c8e4b8e0b97ab2de1f3885d | 913f6a6fe3ed7348ae241e7cfca5dae73738df81 | refs/heads/master | 2022-11-05T15:52:25.554072 | 2020-06-23T04:57:51 | 2020-06-23T04:57:51 | 273,156,622 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 813 | r | ejemplos.R | library(tidyverse)
papers <- as_tibble(read_csv("C:/Users/o/Documents/CitesforSara.csv"))
papers_select<-select(papers,journal, year, cites, title, au1)
count(filter(papers_select, cites >= 100))
papers2=group_by(papers_select,journal)
econometrica=filter(papers2, journal == 'Econometrica')
sum(econometrica$cites)
distinct_vector = papers_select$au1
x <- c(1, 5, 4, 9, 0)
successes<-rbinom(1000, 8, 0.2)
hist(successes)
dbinom(7, size=10, prob=0.65)
pbinom(7, size=10, prob=0.65)
1-pbinom(6, size=10, prob=0.65)+dbinom(6, size=10, prob=0.65)
binom_draws <- as_tibble(data.frame(successes))
estimated_pf <- binom_draws %>%
group_by(______) %>%
_____(n=n()) %>%
mutate(freq=n/sum(______))
ggplot(estimated_pf, aes(x=successes, y=freq)) +
geom_col() +
ylab("Estimated Density") |
159d813614d3e25e2aa5d086c21a0fe485ecd003 | 296935c1096701aafd6d848c26fc3846c2acf0f4 | /ETF_Returns_RScript.R | ed01f5c7b4022f7285008ac89a10a142752f4264 | [] | no_license | nickdani197/Predicting-ETF-Returns | 7146848c24c7dc979525630c733771f67df2ff23 | acacab57b7e7ce442f59f32c9a16431619eea463 | refs/heads/main | 2023-01-14T09:20:51.555199 | 2020-11-19T00:41:25 | 2020-11-19T00:41:25 | 314,090,839 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,254 | r | ETF_Returns_RScript.R | #Asma Karedia, Gunner West, Noumik Thadani, Som Jadhav
#Group39
#change this line to reflect the path to the csv file on your own computer
#etfs=read.csv("\\Users\\somja\\Documents\\College\\STA 371G\\ETFs_Updated_1.csv",row.names=1)
#etfs=read.csv("/Users/asmakaredia/Downloads/ETFs_Updated_1.csv",row.names=1)
#etfs=read.csv("\\Users\\Gunner\\Documents\\UTexas\\Classes\\(STA 371G) Statistics and Modeling\\Project\\ETFs_Updated_1.csv",row.names=1)
#etfs=read.csv("/Users/Noumik/Downloads/ETFs_Updated_1.csv",row.names=1)
#get rid of ETFs that don't fit into any investment type category
etfs_cleaned=etfs[!(etfs$investment==""),]
#want etfs which have somewhat even distribution of investments (industry wise)
etfs_cleaned=etfs_cleaned[!(etfs_cleaned$financial_services>80 | etfs_cleaned$technology>80 | etfs_cleaned$energy>80
| etfs_cleaned$industrials>80 | etfs_cleaned$healthcare>80 | etfs_cleaned$consumer_cyclical>80
| etfs_cleaned$basic_materials>80 | etfs_cleaned$real_estate>80 | etfs_cleaned$consumer_defensive>80
| etfs_cleaned$utilities>80 | etfs_cleaned$communication_services>80),]
#get rid of extraneous columns
etfs_cleaned<-etfs_cleaned[,-c(2,12,15,16,18,19,24)]
#get rows with missing data
etfs_na=etfs_cleaned[!complete.cases(etfs_cleaned), ]
#replace missing values for net_assets and fund_yield with mean of columns
etfs_cleaned$net_assets[is.na(etfs_cleaned$net_assets)]<-mean(etfs_cleaned$net_assets, na.rm=T)
etfs_cleaned$fund_yield[is.na(etfs_cleaned$fund_yield)]<-mean(etfs_cleaned$fund_yield, na.rm=T)
#remove 3 rows with NA for every column
etfs_cleaned=na.omit(etfs_cleaned)
#collinearity test for P/E, P/B, P/CF
pairs(~etfs_cleaned$price_book+etfs_cleaned$price_cashflow+etfs_cleaned$price_earnings)
cor(etfs_cleaned$price_book,etfs_cleaned$price_earnings,use="complete.obs")
cor(etfs_cleaned$price_earnings,etfs_cleaned$price_cashflow,use="complete.obs")
#collinearity check for portfolio stocks vs. sector/industry allocations
cor(etfs_cleaned$portfolio_stocks,etfs_cleaned$financial_services)
cor(etfs_cleaned$portfolio_stocks,etfs_cleaned$consumer_cyclical)
cor(etfs_cleaned$portfolio_stocks,etfs_cleaned$healthcare)
cor(etfs_cleaned$portfolio_stocks,etfs_cleaned$technology)
cor(etfs_cleaned$portfolio_stocks,etfs_cleaned$energy)
cor(etfs_cleaned$portfolio_stocks,etfs_cleaned$industrials)
#change column names
names(etfs_cleaned)[6]<-"expense_ratio"
names(etfs_cleaned)[7]<-"pct_stocks"
names(etfs_cleaned)[12]<-"finance"
names(etfs_cleaned)[16]<-"tech"
names(etfs_cleaned)[17]<-"return_5_yr"
names(etfs_cleaned)[18]<-"beta_5_yr"
View(etfs_cleaned)
#regsubsets
#install leaps
install.packages("leaps")
library(leaps)
plot(regsubsets(return_5_yr~net_assets+fund_yield+investment+expense_ratio+pct_stocks+price_earnings
+consumer_cyclical+Inverse,data=etfs_cleaned),scale="adjr2",ylab="Adjusted R Squared")
dev.copy(png,'Regsubsets.png')
dev.off()
sum <- lm(return_5_yr ~ investment + pct_stocks + consumer_cyclical + Inverse , data=etfs_cleaned)
summary(sum)
#Outliers
#Categorical variables cannot be searched for outliers
#Investment and Inverse will remain as they are
#Consumer Cyclical Outliers
boxplot(etfs_cleaned$consumer_cyclical , xlab="Consumer Cyclical")
summary(etfs_cleaned$consumer_cyclical)
dev.copy(png,'consumerCyclical.png')
dev.off()
lessthan13CC <- subset(etfs_cleaned,etfs_cleaned$consumer_cyclical < 13)
summary(lessthan13CC$consumer_cyclical)
boxplot(lessthan13CC$consumer_cyclical , xlab="Consumer Cyclical Without Outliers")
dev.copy(png,'consumerCyclical-noOutliers.png')
dev.off()
#Percent Stocks Outliers
boxplot(etfs_cleaned$pct_stocks , xlab="Percent of portfolio in stocks")
summary(etfs_cleaned$pct_stocks)
dev.copy(png,'percentStocks.png')
dev.off()
#Graphical and Numerical Summaries
#Graphical and numerical summary for y variable
boxplot(etfs_cleaned$return_5_yr, xlab="5 Year Return")
hist(etfs_cleaned$return_5_yr, col="grey")
summary(etfs_cleaned$return_5_yr)
mean(etfs_cleaned$return_5_yr)
sd(etfs_cleaned$return_5_yr)
#Graphical and numerical summaries for x variables
#Investments
plot(etfs_cleaned$investment, xlab="Investment Types", ylab="Number of Funds")
summary(etfs_cleaned$investment)
#Consumer Cyclical
boxplot(etfs_cleaned$consumer_cyclical , xlab="Consumer Cyclical")
summary(etfs_cleaned$consumer_cyclical)
mean(etfs_cleaned$consumer_cyclical)
sd(etfs_cleaned$consumer_cyclical)
#Pct Stocks
hist(etfs_cleaned$pct_stocks , xlab="Percent of portfolio in stocks", col="grey")
summary(etfs_cleaned$pct_stocks)
mean(etfs_cleaned$pct_stocks)
sd(etfs_cleaned$pct_stocks)
#Inverse
plot(etfs_cleaned$Inverse, xlab="Inverse ETF")
summary(etfs_cleaned$Inverse)
#Graphical and numerical summaries for each y~x
#Investments
plot(etfs_cleaned$investment, etfs_cleaned$return_5_yr, xlab="Investment Types" , ylab="5 Year Return")
investment <- lm(return_5_yr ~ investment , data = etfs_cleaned)
summary(investment)
plot(investment)
#Consumer Cyclical
plot(etfs_cleaned$consumer_cyclical, etfs_cleaned$return_5_yr, xlab="Consumer Cyclical" , ylab="5 Year Return")
plot(lessthan13CC$consumer_cyclical,etfs_cleaned$return_5_yr)
cyclical <- lm(return_5_yr ~ consumer_cyclical , data = etfs_cleaned)
summary(cyclical)
plot(cyclical)
#Pct Stocks
plot(etfs_cleaned$pct_stocks, etfs_cleaned$return_5_yr, xlab="Percentage Stocks" , ylab="5 Year Return")
stocks <- lm(return_5_yr ~ pct_stocks , data = etfs_cleaned)
summary(stocks)
plot(stocks)
#Inverse
plot(etfs_cleaned$Inverse)
plot(etfs_cleaned$Inverse, etfs_cleaned$return_5_yr, xlab="Inverse" , ylab="5 Year Return")
inverse <- lm(return_5_yr ~ Inverse , data = etfs_cleaned)
summary(inverse)
plot(inverse)
#Multiple Regression Model
final.model <- lm(return_5_yr ~ investment + consumer_cyclical + pct_stocks + Inverse, data = etfs_cleaned)
summary(final.model)
plot(predict(final.model), residuals(final.model))
plot(final.model)
#Prediction Example
predict(final.model, list(investment="Growth", consumer_cyclical=8, pct_stocks=90, Inverse="No"))
|
ae5e0aabc1678bda796dbf7aea9c2ba4925b2479 | b35f8b98770ae2b4ab90edf4dfa1354acbc0d6c4 | /2020/April/TidyTuesday - 15-4-2020.R | f3568cb62c3b5254a121fb2d5d23364e52a17638 | [] | no_license | JuanmaMN/TidyTuesday | d87aff4f27e14dd2a78eb82603206f1478694abf | 1f373d84f5cf0c829bd1944b5f1c7f0b8c4d39e9 | refs/heads/master | 2023-07-24T22:53:45.688036 | 2023-07-11T20:02:31 | 2023-07-11T20:02:31 | 181,566,002 | 11 | 6 | null | null | null | null | UTF-8 | R | false | false | 4,963 | r | TidyTuesday - 15-4-2020.R | # Upload the packages -----------------------------------------------------
library(scales)
library(tidyverse)
library(patchwork)
# Raw data ----------------------------------------------------------------
rankings <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-04-14/rankings.csv')
View(rankings)
# Prepare the data --------------------------------------------------------
rankings_chart_year<-rankings%>% group_by(year) %>%
summarise(total_points=sum(points))
rankings_chart_year_2<-rankings%>% group_by(year,gender) %>% filter(gender !="mixed")%>%
summarise(avg_points=sum(points)/sum(n))
# ribbon ------------------------------------------------------------------
g1<-ggplot(rankings_chart_year, aes(x = year, y = total_points)) +
geom_ribbon(aes(ymax = total_points, ymin = 0),
fill = "#ade6d8", alpha = 0.7) +
geom_line(color = "#6F213F") +
scale_x_continuous(
breaks = c(1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015),
limits = c(1979, 2019),
expand = c(0, 0)
) +
scale_y_continuous(limits = c(0, 350),
expand = c(0, 0)) +
labs(x = "",y = "",
title = "Total number of points",
subtitle = " ",
caption = "") +
guides(fill = NULL) +
theme(
plot.title = element_text(margin = margin(b = 8),
color = "#22222b",face = "bold",size = 14,
hjust = 0.5,
family = "Arial"),
plot.subtitle = element_text(margin = margin(t=10,b = 25),
color = "#22222b", size = 9, family = "Arial",
hjust = 0.5),
plot.caption = element_text(margin = margin(t = 20),
color = "#22222b", size = 10, family = "Arial",
hjust = 0.95),
axis.title.x = element_text(margin = margin(t = 10),
color = "#22222b"),
axis.title.y = element_text(margin = margin(r = 15),
color = "#22222b"),
legend.position = "none",
axis.text.x = element_text(color = "#22222b"),
axis.text.y = element_text(color = "#22222b"),
panel.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.major.y = element_blank(),
panel.grid.minor = element_blank(),
plot.background = element_rect(fill = "#f7f7f7"),
#plot.margin = unit(c(1, 2, 2, 1), "cm"),
axis.ticks = element_blank()
) +
geom_point(x= 1994, y = 308,size=4, shape=21, fill="#CB454A") +
annotate("text", x = 2000, y =265,fontface =2,
hjust = 0.5, color = "#CB454A",
size = 2.5, label = paste0("The Notorious B.I.G. - 140 points \n Nas - 46 points \n Nas ft. A.Z. - 20 points")) +
annotate("text", x = 2000, y = 300,fontface =2,
hjust = 0.5, color = "#000000",
size = 2.5, label = paste0("1994 - Highest number of points - 308"))
g2<-ggplot(rankings_chart_year_2, aes(x = year, y = avg_points)) +
geom_ribbon(aes(ymax = avg_points, ymin = 0),
fill = "#add8e6", alpha = 0.7) +
geom_line(color = "#6F213F") +
scale_y_continuous(expand = expand_scale(mult = 0)) +
scale_x_continuous(
breaks = c(1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015),
limits = c(1979, 2019),
expand = c(0, 0)
)+
labs(x = "",y = "",
title = "Average number of points per vote",
subtitle = "",
caption = "Source:Tidy Tuesday\nVisualization: JuanmaMN (Twitter @Juanma_MN)") +
guides(fill = NULL) +
theme(
plot.title = element_text(margin = margin(b = 8),
color = "#22222b",face = "bold",size = 14,
hjust = 0.5,
family = "Arial"),
plot.subtitle = element_text(margin = margin(t=10,b = 25),
color = "#22222b", size = 9, family = "Arial",
hjust = 0.5),
plot.caption = element_text(margin = margin(t = 20),
color = "#22222b", size = 10, family = "Arial",
hjust = 0.95),
axis.title.x = element_text(margin = margin(t = 10),
color = "#22222b"),
axis.title.y = element_text(margin = margin(r = 15),
color = "#22222b"),
legend.position = "none",
axis.text.x = element_text(color = "#22222b"),
axis.text.y = element_text(color = "#22222b"),
panel.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.major.y = element_blank(),
panel.grid.minor = element_blank(),
plot.background = element_rect(fill = "#f7f7f7"),
axis.ticks = element_blank()
) +
geom_hline(yintercept = 5, color = "red1", size = 0.7)
patchwork <- g1 / g2
patchwork
|
8b0341260cd53fdf6c84cf902a9ef249e0558cf2 | ea7ed2c3dbba844bf10f8f258906da537d5b52fe | /R/tutorials/GA_OULU/ouluWorkshop.R | ac9635a469a9b12fac5ddec78b439141f9b2dc82 | [] | no_license | lifecycle-project/analysis-tutorials | 6fc1cb8f5a58d245520682ee470610fe7b649a00 | 831e1aeba521d24b0aececbf9a0a4c039a3b7912 | refs/heads/master | 2021-06-10T22:42:53.896961 | 2021-03-10T15:17:55 | 2021-03-10T15:17:55 | 136,932,628 | 3 | 1 | null | 2021-03-10T15:17:56 | 2018-06-11T13:48:07 | R | UTF-8 | R | false | false | 5,447 | r | ouluWorkshop.R | # Load the necessary libraries
# General R-packages
library(metafor)
# Specific DataSHIELD packages
library(opal)
library(dsBaseClient)
library(dsStatsClient)
library(dsGraphicsClient)
library(dsModellingClient)
# Setup servers
server <- c("test-opal1", "test-opal2")
url <- c("https://opal1.domain.org", "https://opal2.domain.org")
username <- c("usr1", "usr2")
password <- c("pwd1", "pwd2")
table <- c("Tutorials.tutorial_novice", "Tutorials.tutorial_novice")
logindata <- data.frame(server,url,username,password,table)
#hello
# log out
datashield.logout(opals)
# log in
opals <- datashield.login(logins=logindata1,assign=TRUE)
# what is there?
ds.ls()
# detail of table
ds.summary('D')
#describe the studies:
ds.dim(x='D')
#the "combine" comand allows us to identify the total number of observations and variables pooled across
#all studies:
ds.dim('D', type='combine')
# 1) Multiple linear regression (wide format) examining the association between
# smoking in pregnancy and gestational age at birth in singleton pregnancies.
# Outcome: gestational age in weeks at birth of child, limited to singleton pregnancies and live births
# Exposure: smoking in pregnancy (yes/no)
# Covariates: mother's age at birth, maternal education at birth
# First step - limit to singleton pregnancies and live births
ds.subset(x = 'D', subset = 'D2', logicalOperator = 'plurality==', threshold = 1)
ds.subset(x = 'D2', subset = 'D3', logicalOperator = 'outcome==', threshold = 1)
# check something happened
ds.table1D('D3$plurality')
ds.table1D('D3$outcome')
# create a cohort variable
ds.assign(toAssign = "(D3$cohort_id/D3$cohort_id)", newobj = 'cohort', datasources = opals['test-opal1'])
ds.assign(toAssign = "((D3$cohort_id/D3$cohort_id)+1)", newobj = 'cohort', datasources = opals['test-opal2'])
ds.cbind(x=c('D3', 'cohort'), newobj = 'D4', datasources = opals)
#tabulate the new variable separately for each cohort:
ds.table1D(x='D4$cohort', type='split')
#check the distribution of the outcome variable is approximately normal:
ds.histogram(x='D4$ga_bj')
#Examine whether there is evidence that hgestational age
#is affected by smoking in pregnancy:
ds.meanByClass(x='D4$ga_bj~D4$preg_smk')
#"preg_smk" needs to be a factor variable for this function to work;
#"preg_smk" is currently not a factor variable
#we can check the class (i.e. integer, character, factor etc.)
#of by using the "ds.class" function:
ds.class(x='D4$preg_smk')
#we can us the "ds.asFactor" function to create a new pregnancy smoking variable
#which is a factor variable:
ds.asFactor(x='D4$preg_smk', newobj = 'preg_smk_fact', datasources = opals)
#This new variable/vector is not attached to a data frame (default name D ).
#We can bind it to a data frame using the "cbind" function.
#To do this, the dataframe and the variable we want to attach must be the same length
#We can check their lengths using the command "ds.length"
ds.length (x='preg_smk_fact')
ds.cbind(x=c('D4', 'preg_smk_fact'), newobj = 'D5', datasources = opals)
mean_by_class = ds.meanByClass(x='D5$ga_bj~D5$preg_smk_fact')
mean_by_class
#computation of the standard error of the mean among non-exposed:
sem0 = as.numeric(gsub(".*\\((.*)\\).*", "\\1", mean_by_class[2,1]))/ sqrt(as.numeric(mean_by_class[1,1]))
#95% confidence intervals of the mean
CI_95_0 = c(as.numeric(sub(" *\\(.*", "", mean_by_class[2,1])) - 2*sem0, as.numeric(sub(" *\\(.*", "", mean_by_class[2,1])) + 2*sem0)
#computation of the standard error of the mean among exposed:
sem1 = as.numeric(gsub(".*\\((.*)\\).*", "\\1", mean_by_class[2,2]))/ sqrt(as.numeric(mean_by_class[1,2]))
#95% confidence intervals of the mean
CI_95_1 = c(as.numeric(sub(" *\\(.*", "", mean_by_class[2,2])) - 2*sem1, as.numeric(sub(" *\\(.*", "", mean_by_class[2,2])) + 2*sem1)
CI_95_0
CI_95_1
#Contour plots or heat map plots are used in place of scatter plots
#(which cannot be used as they are potentially disclosive)
# in DataSHIELD to visualize correlation patterns
#For e.g.:
ds.contourPlot(x='D4$ga_bj', y='D4$agebirth_m_d')
ds.heatmapPlot(x='D4$ga_bj', y='D4$agebirth_m_d')
#mean centre maternal age:
mean_cen = ds.mean(x='D4$agebirth_m_d')
my_str = paste0('D4$agebirth_m_d-', mean_cen)
ds.assign(toAssign=my_str, newobj='agebirth_m_d_c')
ds.histogram('agebirth_m_d_c')
ds.cbind(x=c('D4', 'agebirth_m_d_c'), newobj = 'D6', datasources = opals)
# fit the model. This is fitting one model to both datasets as if they were pooled together
ds.glm(formula = 'D6$ga_bj~D6$preg_smk+D6$agebirth_m_d_c+D6$edu_m_0+D6$cohort', data = 'D6', family = 'gaussian')
#the help function gives you an explanation of the commands:
help(ds.glm)
# alternatively you can fit a model to each cohort and then meta analyse the results to allow between cohort variation
st1 = ds.glm(formula = 'D6$ga_bj~D6$preg_smk+D6$agebirth_m_d_c+D6$edu_m_0', data = 'D6', family = 'gaussian', datasources = opals['test-opal1'])
st2 = ds.glm(formula = 'D6$ga_bj~D6$preg_smk+D6$agebirth_m_d_c+D6$edu_m_0', data = 'D6', family = 'gaussian', datasources = opals['test-opal2'])
#yi is a vector with the effect size estimates (B coeffecients)
#sei is a vector with the individual cohort standard errors
yi <- c(st1$coefficients["preg_smk","Estimate"], st2$coefficients["preg_smk","Estimate"])
sei <- c(st1$coefficients["preg_smk","Std. Error"], st2$coefficients["preg_smk","Std. Error"])
#Random effects model:
res <- rma(yi, sei=sei)
res
forest(res)
|
bc7e3bdf7f852e68a96b0515d5713c9d94e62ccc | da3bbf05c5cd587fac21dc1ae000ac09254e6006 | /R/hdf5_to_df.R | e7d115f8fcb8bb19702a7e1de7062b22581ea81b | [] | no_license | rhlee12/Noble | 1804fba5c2ed10fc5dcaa28f5bcb1b12e3e1ea5f | 87243480555931dbd1414b82019c07ad4166a66c | refs/heads/master | 2020-04-24T11:10:05.474520 | 2019-05-15T21:06:38 | 2019-05-15T21:06:38 | 171,916,801 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,739 | r | hdf5_to_df.R | ############################################################################################
#' @title Convert NEON Eddy Covaraince Data From hdf5 to Data Frames
#' @author Robert Lee \email{rlee@battelleecology.org}\cr
#' @description This function will extract a given dataset ('meas.name') from the nested hdf5
#' data structure, and convert it to a data frame. If a save location is specified, a csv of the
#' data will also be saved.
#'
#' @param site Parameter of class character. The 4-letter NEON site code that the data is for.
#' @param hdf5.file Parameter of class character. The path to the hdf5 file to convert.
#' @param meas.name Parameter of class character. The name of the measurement in the hdf5 file
#' to be extracted.
#' @param time.agr What the time difference between sequence values should be, in minutes.
#' @param save.dir Optional. If specified a CSV of the extracted data will be saved to the
#' input directory.
#'
#' @return A data table of mesurements for the requested data product.
#'
#' @keywords eddy covariance, hdf5, process quality, data quality, gaps, commissioning
#' @export
# changelog and author contributions / copyrights
# Robert Lee (2018-03-21)
# original creation
#
##############################################################################################
hdf5.to.df=function(site, files, data.type, meas.name, var.name, bgn.month, end.month, time.agr, save.dir, overwrite=FALSE){
library(magrittr)
### INPUT CHECKING
ok.time=c(1, 30)
ok.meas=c("amrs", "co2Stor", "co2Turb", "fluxHeatSoil", "h2oSoilVol", "h2oStor",
"h2oTurb", "isoCo2", "isoH2o", "presBaro", "radiNet", "soni",
"tempAirLvl", "tempAirTop", "tempSoil")
#ok.vars=c()
if(!meas.name %in% ok.meas){
message("Invalid measurement name selected. Please enter one of the following:")
stop(print(ok.meas))
}
if(!time.agr %in% ok.time){
stop("Invalid temporal aggregation input. Please enter either 1 or 30.")
}
### FILE NAME PARAMETERS
start.date=paste0(bgn.month, "-01")
end.date=Noble::last.day.time(end.month = end.month, time.agr = time.agr)
file.out=paste0(save.dir, "/", "EC_", data.type,"_", meas.name, "_", var.name, "_", start.date, "-", substr(end.date, start = 1, stop = 10), ".csv")
print(file.out)
### GENERATE NEW FLAT DF
if(!file.exists(file.out)|all(file.exists(file.out), overwrite)){
top.ml=Noble::tis_site_config$num.of.mls[Noble::tis_site_config$site.id==site]
# GENERATE H.V.T GROUP MEETING
hor.ver.tmi=paste0("000_0", top.ml, "0_", stringr::str_pad(string = time.agr, width = 2, side = "left", pad = "0"), "m")
troubleshoot=function(hdf5.file){
print(hdf5.file)
try(rhdf5::h5read(file=hdf5.file, paste0(site,'/dp01/', data.type, '/',meas.name,'/', hor.ver.tmi, "/", var.name)))
}
ec.list=lapply(files, troubleshoot)
ec.list=ec.list[lapply(ec.list, class)=="data.frame"]
ec.data=do.call(plyr::rbind.fill, ec.list)
clean.times=function(x){
x %>%
gsub(pattern = "T|Z", replacement = " ") %>%
trimws() %>%
as.POSIXct(tz = "UTC", format="%Y-%m-%d %H:%M:%S") -> out
return(out)
}
ec.data$timeBgn=clean.times(ec.data$timeBgn)
ref.seq=data.frame(startDateTime=Noble:::help.time.seq(from = start.date, to = end.date, time.agr = time.agr))
out=merge(x=ref.seq, y = ec.data, by.x = "startDateTime", by.y = "timeBgn", all.x = TRUE)
write.csv(x = out, file = file.out, row.names = FALSE)
}else{
out=read.csv(file.out, stringsAsFactors = FALSE)
}
rhdf5::h5closeAll()
return(out)
}
|
b028252bf8e90b03ea14340464b92d212fae1700 | a6bd2ecf8481bd78771357635443295270098efd | /papers/lit-review/src/r/summary_stats.R | bce8f45fa392f4693f80c0aacc7848dc00e82505 | [] | no_license | shawes/thesis | d6f8c7e6d6f8fdd99133d2a56cdadafc56c345a1 | c1b853e8329581b3ccc6bfe2efc008b71e2609c3 | refs/heads/master | 2021-03-19T17:18:36.414001 | 2018-12-11T08:12:16 | 2018-12-11T08:12:16 | 54,354,111 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 540 | r | summary_stats.R | library("tidyverse")
library("readr")
library("corrr")
library("ggplot2")
library("dplyr")
clean_dataset <- read_csv("data/lit_review_cleaned.csv")
spec(clean_dataset)
summary(clean_dataset)
isNum <- sapply(clean_dataset, is.numeric)
numeric_data <- select(clean_dataset, which(isNum))
correlated <- numeric_data %>% correlate() # finds all the correlations between numeric values
ggplot(settlement, aes(sr, ss))
geom_point(aes(size = count), alpha = 1/2) +
geom_smooth() +
scale_size_area()
journals <- factor(clean_dataset$Journal) |
b18ec430a8f409148315d3e2fb2a35ab73de6e88 | 8ae74fb56be72eef39a0214a04c29218949ba3fe | /R/internals.R | 6f04e8ed7ca6d968837243f71fc1a3e130942872 | [] | no_license | amrei-stammann/alpaca | 469a26d71683da1deb29cdcc89f544e121ca4258 | c9ce131d949327e8b261f1df9a7d02823c5343ff | refs/heads/master | 2022-09-28T00:22:10.217075 | 2022-09-19T08:23:09 | 2022-09-19T08:23:09 | 116,491,542 | 40 | 8 | null | 2020-11-11T08:56:01 | 2018-01-06T14:58:16 | R | UTF-8 | R | false | false | 9,634 | r | internals.R | ### Internal functions (not exported)
# Checks if variable is a factor and transforms if necessary
checkFactor <- function(x) {
if (is.factor(x)) {
droplevels(x)
} else {
factor(x)
}
}
# Fitting algorithm (similar to glm.fit)
feglmFit <- function(beta, eta, y, X, wt, k.list, family, control) {
# Extract control arguments
center.tol <- control[["center.tol"]]
dev.tol <- control[["dev.tol"]]
epsilon <- max(min(1.0e-07, dev.tol / 1000.0), .Machine[["double.eps"]])
iter.max <- control[["iter.max"]]
trace <- control[["trace"]]
keep.mx <- control[["keep.mx"]]
# Compute initial quantities for the maximization routine
nt <- length(y)
mu <- family[["linkinv"]](eta)
dev <- sum(family[["dev.resids"]](y, mu, wt))
null.dev <- sum(family[["dev.resids"]](y, mean(y), wt))
# Generate temporary variables
Mnu <- as.matrix(numeric(nt))
MX <- X
# Start maximization of the log-likelihood
conv <- FALSE
for (iter in seq.int(iter.max)) {
# Store \eta, \beta, and deviance of the previous iteration
eta.old <- eta
beta.old <- beta
dev.old <- dev
# Compute weights and dependent variable
mu.eta <- family[["mu.eta"]](eta)
w <- (wt * mu.eta^2) / family[["variance"]](mu)
w.tilde <- sqrt(w)
nu <- (y - mu) / mu.eta
# Centering variables
Mnu <- centerVariables((Mnu + nu), w, k.list, center.tol)
MX <- centerVariables(MX, w, k.list, center.tol)
# Compute update step and update \eta
beta.upd <- as.vector(qr.solve(MX * w.tilde, Mnu * w.tilde, epsilon))
eta.upd <- nu - as.vector(Mnu - MX %*% beta.upd)
# Step-halving with three checks
# 1. finite deviance
# 2. valid \eta and \mu
# 3. improvement as in glm2
rho <- 1.0
for (inner.iter in seq.int(50L)) {
eta <- eta.old + rho * eta.upd
beta <- beta.old + rho * beta.upd
mu <- family[["linkinv"]](eta)
dev <- sum(family[["dev.resids"]](y, mu, wt))
dev.crit <- is.finite(dev)
val.crit <- family[["valideta"]](eta) && family[["validmu"]](mu)
imp.crit <- (dev - dev.old) / (0.1 + abs(dev)) <= - dev.tol
if (dev.crit && val.crit && imp.crit) break
rho <- rho / 2.0
}
# Check if step-halving failed (deviance and invalid \eta or \mu)
if (!dev.crit || !val.crit) {
stop("Inner loop failed; cannot correct step size.", call. = FALSE)
}
# Stop if we do not improve
if (!imp.crit) {
eta <- eta.old
beta <- beta.old
dev <- dev.old
mu <- family[["linkinv"]](eta)
}
# Progress information
if (trace) {
cat("Deviance=", format(dev, digits = 5L, nsmall = 2L), "Iterations -", iter, "\n")
cat("Estimates=", format(beta, digits = 3L, nsmall = 2L), "\n")
}
# Check convergence
dev.crit <- abs(dev - dev.old) / (0.1 + abs(dev))
if (trace) cat("Stopping criterion=", dev.crit, "\n")
if (dev.crit < dev.tol) {
if (trace) cat("Convergence\n")
conv <- TRUE
break
}
# Update starting guesses for acceleration
Mnu <- Mnu - nu
}
# Information if convergence failed
if (!conv && trace) cat("Algorithm did not converge.\n")
# Update weights and dependent variable
mu.eta <- family[["mu.eta"]](eta)
w <- (wt * mu.eta^2) / family[["variance"]](mu)
# Center variables
MX <- centerVariables(X, w, k.list, center.tol)
# Recompute Hessian
H <- crossprod(MX * sqrt(w))
# Generate result list
reslist <- list(
coefficients = beta,
eta = eta,
weights = wt,
Hessian = H,
deviance = dev,
null.deviance = null.dev,
conv = conv,
iter = iter
)
# Update result list
if (keep.mx) reslist[["MX"]] <- MX
# Return result list
reslist
}
# Efficient offset algorithm to update the linear predictor
feglmOffset <- function(object, offset) {
# Check validity of 'object'
if (!inherits(object, "feglm")) {
stop("'feglmOffset' called on a non-'feglm' object.")
}
# Extract required quantities from result list
control <- object[["control"]]
data <- object[["data"]]
wt <- object[["weights"]]
family <- object[["family"]]
formula <- object[["formula"]]
lvls.k <- object[["lvls.k"]]
nt <- object[["nobs"]][["nobs"]]
k.vars <- names(lvls.k)
# Extract dependent variable
y <- data[[1L]]
# Extract control arguments
center.tol <- control[["center.tol"]]
dev.tol <- control[["dev.tol"]]
iter.max <- control[["iter.max"]]
# Generate auxiliary list of indexes to project out the fixed effects
k.list <- getIndexList(k.vars, data)
# Compute starting guess for \eta
if (family[["family"]] == "binomial") {
eta <- rep(family[["linkfun"]](sum(wt * (y + 0.5) / 2.0) / sum(wt)), nt)
# eta <- rep(mean(family[["linkfun"]]((y + 0.5) / 2.0)), nt)
} else if (family[["family"]] %in% c("Gamma", "inverse.gaussian")) {
eta <- rep(family[["linkfun"]](sum(wt * y) / sum(wt)), nt)
# eta <- rep(mean(family[["linkfun"]](y)), nt)
} else {
eta <- rep(family[["linkfun"]](sum(wt * (y + 0.1)) / sum(wt)), nt)
# eta <- rep(mean(family[["linkfun"]](y + 0.1)), nt)
}
# Compute initial quantities for the maximization routine
mu <- family[["linkinv"]](eta)
dev <- sum(family[["dev.resids"]](y, mu, wt))
Myadj <- as.matrix(numeric(nt))
# Start maximization of the log-likelihood
for (iter in seq.int(iter.max)) {
# Store \eta, \beta, and deviance of the previous iteration
eta.old <- eta
dev.old <- dev
# Compute weights and dependent variable
mu.eta <- family[["mu.eta"]](eta)
w <- (wt * mu.eta^2) / family[["variance"]](mu)
yadj <- (y - mu) / mu.eta + eta - offset
# Centering dependent variable and compute \eta update
Myadj <- centerVariables((Myadj + yadj), w, k.list, center.tol)
eta.upd <- yadj - as.vector(Myadj) + offset - eta
# Step-halving with three checks
# 1. finite deviance
# 2. valid \eta and \mu
# 3. improvement as in glm2
rho <- 1.0
for (inner.iter in seq.int(50L)) {
eta <- eta.old + rho * eta.upd
mu <- family[["linkinv"]](eta)
dev <- sum(family[["dev.resids"]](y, mu, wt))
dev.crit <- is.finite(dev)
val.crit <- family[["valideta"]](eta) && family[["validmu"]](mu)
imp.crit <- (dev - dev.old) / (0.1 + abs(dev)) <= - dev.tol
if (dev.crit && val.crit && imp.crit) break
rho <- rho / 2.0
}
# Check if step-halving failed
if (!dev.crit || !val.crit) {
stop("Inner loop failed; cannot correct step size.", call. = FALSE)
}
# Check termination condition
if (abs(dev - dev.old) / (0.1 + abs(dev)) < dev.tol) break
# Update starting guesses for acceleration
Myadj <- Myadj - yadj
}
# Return \eta
eta
}
# Generate auxiliary list of indexes for different sub panels
getIndexList <- function(k.vars, data) {
indexes <- seq.int(0L, nrow(data) - 1L)
lapply(k.vars, function(x, indexes, data) {
split(indexes, data[[x]])
}, indexes = indexes, data = data)
}
# Compute score matrix
getScoreMatrix <- function(object) {
# Extract required quantities from result list
control <- object[["control"]]
data <- object[["data"]]
eta <- object[["eta"]]
wt <- object[["weights"]]
family <- object[["family"]]
# Update weights and dependent variable
y <- data[[1L]]
mu <- family[["linkinv"]](eta)
mu.eta <- family[["mu.eta"]](eta)
w <- (wt * mu.eta^2) / family[["variance"]](mu)
nu <- (y - mu) / mu.eta
# Center regressor matrix (if required)
if (control[["keep.mx"]]) {
MX <- object[["MX"]]
} else {
# Extract additional required quantities from result list
formula <- object[["formula"]]
k.vars <- names(object[["lvls.k"]])
# Generate auxiliary list of indexes to project out the fixed effects
k.list <- getIndexList(k.vars, data)
# Extract regressor matrix
X <- model.matrix(formula, data, rhs = 1L)[, - 1L, drop = FALSE]
nms.sp <- attr(X, "dimnames")[[2L]]
attr(X, "dimnames") <- NULL
# Center variables
MX <- centerVariables(X, w, k.list, control[["center.tol"]])
colnames(MX) <- nms.sp
}
# Return score matrix
MX * (nu * w)
}
# Higher-order partial derivatives for 'binomial()'
partialMuEta <- function(eta, family, order) {
# Safeguard \eta if necessary
if (family[["link"]] != "logit") {
eta <- family[["linkfun"]](family[["linkinv"]](eta))
}
# Second- and third-order derivatives
f <- family[["mu.eta"]](eta)
if (order == 2L) {
# Second-order derivative
if (family[["link"]] == "logit") {
f * (1.0 - 2.0 * family[["linkinv"]](eta))
} else if (family[["link"]] == "probit") {
- eta * f
} else if (family[["link"]] == "cloglog") {
f * (1.0 - exp(eta))
} else {
- 2.0 * eta / (1.0 + eta^2) * f
}
} else {
# Third-order derivative
if (family[["link"]] == "logit") {
f * ((1.0 - 2.0 * family[["linkinv"]](eta))^2 - 2.0 * f)
} else if (family[["link"]] == "probit") {
(eta^2 - 1.0) * f
} else if (family[["link"]] == "cloglog") {
f * (1.0 - exp(eta)) * (2.0 - exp(eta)) - f
} else {
(6.0 * eta^2 - 2.0) / (1.0 + eta^2)^2 * f
}
}
}
# Returns suitable name for a temporary variable
tempVar <- function(data) {
repeat {
tmp.var <- paste0(sample(letters, 5L, replace = TRUE), collapse = "")
if (!(tmp.var %in% colnames(data))) {
break
}
}
tmp.var
}
# Unload
.onUnload <- function(libpath) {
library.dynam.unload("alpaca", libpath)
} |
784119a5378ed520b3a125c59b5c8b38ad1dd015 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.machine.learning/man/frauddetector_get_lists_metadata.Rd | 1f5b73f801ea5ad3f489b3189a265952ccbc3264 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 784 | rd | frauddetector_get_lists_metadata.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/frauddetector_operations.R
\name{frauddetector_get_lists_metadata}
\alias{frauddetector_get_lists_metadata}
\title{Gets the metadata of either all the lists under the account or the
specified list}
\usage{
frauddetector_get_lists_metadata(
name = NULL,
nextToken = NULL,
maxResults = NULL
)
}
\arguments{
\item{name}{The name of the list.}
\item{nextToken}{The next token for the subsequent request.}
\item{maxResults}{The maximum number of objects to return for the request.}
}
\description{
Gets the metadata of either all the lists under the account or the specified list.
See \url{https://www.paws-r-sdk.com/docs/frauddetector_get_lists_metadata/} for full documentation.
}
\keyword{internal}
|
12db4083294b9335c0761cc7614a98e3406aa191 | cce10ff67e665a13bb4d061b979a5a49d58a6b62 | /man/make.placeholder.info.Rd | 6b5ab723d22c3e109f56bbfa19e0bf37f2fc0ed3 | [] | no_license | jimsforks/rmdtools | 680ff42416f23f9c2580e4ba4e71f7e9ab035bfd | 3c4ac9ac53e265ae375259123edd31232e0e5af1 | refs/heads/master | 2023-01-04T17:57:27.953136 | 2020-10-15T07:29:03 | 2020-10-15T07:29:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 290 | rd | make.placeholder.info.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ph.r
\name{make.placeholder.info}
\alias{make.placeholder.info}
\title{Make a info for a placeholder object}
\usage{
make.placeholder.info(txt, type, form)
}
\description{
Make a info for a placeholder object
}
|
8c058ac845a1735eb0292cc9896913289e374e33 | 979c583bb8154b0b12203893b217eb1e9e0770c0 | /man/fb_userId.Rd | 6cd6fad8b0f8e7492cfaeb851de5107f88d2c19f | [] | no_license | lynuhs/fbAdsR | 018906200d4d9fe3a688dc46b2fd1285b1f1f86a | 591317bcc45f050dd49fb8f48fe26bbcab8d9897 | refs/heads/master | 2020-04-23T09:10:16.839050 | 2019-03-18T17:43:25 | 2019-03-18T17:43:25 | 171,059,965 | 5 | 0 | null | null | null | null | UTF-8 | R | false | true | 311 | rd | fb_userId.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fb_userId.R
\name{fb_userId}
\alias{fb_userId}
\title{Get the User ID for the authorized Facebook user}
\usage{
fb_userId()
}
\description{
This function will import the User ID for the authenticated user.
}
\examples{
fb_userId()
}
|
fb989a4efe7dde5529a85834404cfbf0bb9bb7a3 | 88bcfd990ec7822b76cc0123b06c45f2fd96df05 | /R/sourceAll.R | b5cc2f63423fefa46ac54d2d2ea4fd1e7578db7e | [
"MIT"
] | permissive | neuroccino/flexTeaching | bd9f2e8c7bd2fc994e190744aafb852aac38a124 | 4897d983d1b5add68f9f7df9bcfb3ceb0344f477 | refs/heads/master | 2023-01-05T08:12:16.591157 | 2022-01-04T12:36:17 | 2022-01-04T12:36:17 | 71,563,522 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 423 | r | sourceAll.R |
#' Source all the files indicated in the assignment data
#'
#' @param assignment_data data for the particular assignment
#' @param e environment in which to source
#'
#' @return
#'
#' @examples
sourceAll <- function(assignment_data, e){
if(length(assignment_data$source)){
src = file.path(assignment_data$path, assignment_data$source)
for(s in src)
source(s, local = e, chdir = TRUE)
}
return()
}
|
478f2d8a7a514937dfc85a57e6c7d1c8af88f046 | a190758d4f8607d8f69ada74150c8eeefa0ff85b | /r_scripts/vc4_viz.R | 40b9303cd3dcf6408e11af968bfd033c46771eb5 | [] | no_license | MattSkiff/first_repo | b3d854426f2e257a790e39247693fc378d28dd57 | a1b827c246771157ec8b260b1bb537ee888dee49 | refs/heads/master | 2021-06-19T19:35:02.758906 | 2021-02-16T03:27:40 | 2021-02-16T03:27:40 | 172,861,596 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,084 | r | vc4_viz.R | # author: matthew skiffington
# purpose: plain viz of vc4 (surpassing vc dimension) for linear classifiers to go in dissertation - 16 plots
# highlights when data isn't shattered
# randomly generates 3 points and fits a glm + plots decision boundary
# original source: glm code adapted from:
# glm code adapted from : https://stats.stackexchange.com/questions/6206/how-to-plot-decision-boundary-in-r-for-logistic-regression-model/6207
# plot code apated from: https://www.r-bloggers.com/beyond-basic-r-plotting-with-ggplot2-and-multiple-plots-in-one-figure/
library(ggplot2) # viz
library(cowplot) # multi-viz
# png(filename="vc_4.png",
# type="cairo",
# units="px",
# width=1800,
# height=1800,
# pointsize=12,
# res=76)
# randomised plot generator
vc_4.func <- function(x) {
rand_points.vec <- runif(n = 8,min = 0,max = 6)
class_labels.vec <- c(
"Class 1",
"Class 1",
"Class 2",
"Class 2"
)
vc_ex.df <- data.frame(
x = rand_points.vec[1:4],
y = rand_points.vec[5:8],
Class = class_labels.vec
)
model <- glm(Class ~.,family=binomial(link='logit'),data = vc_ex.df)
slope.num <- coef(model)[2]/(-coef(model)[3])
intercept.num <- coef(model)[1]/(-coef(model)[3])
red_box.opt <- NULL
shattered.bool <- sum(round(predict(model,type = 'response')) == (as.numeric(vc_ex.df$Class)-1)) == 4
if(!shattered.bool) {
red_box.geom <- theme(panel.border = element_rect(colour = "red", fill=NA, size=3))
} else {
red_box.geom <- NULL
}
g <- ggplot(data = vc_ex.df) +
geom_point(mapping = aes(x = x,y = y,fill = Class),colour = 'black',size = 2,shape=21 ,stroke = 0.5,) +
geom_abline(intercept = intercept.num, slope = slope.num, linetype, colour='black', size = 1) +
#labs(title = "Illustration of the VC Dimension of a Linear Classifier",subtitle = "Points randomly generated; GLM logistic decision boundary") +
#scale_fill_manual(values = c("black","white"),
# labels = c("Class 1","Class 2")) +
ylim(0,6) +
xlim(0,6) +
theme_light() +
theme(axis.title=element_blank(),
axis.text=element_blank()) +
theme(legend.position = 'none') +
theme(axis.ticks = element_blank()) +
red_box.geom
return(g)
}
vc_4_plots.ls <- suppressWarnings(lapply(FUN = vc_4.func,1:16)) # create plot list
title <- ggdraw() +
draw_label("Surpassing the Vapnik Chervonenkis Dimension \n of a Linear Classifier",
fontface='bold',
size = 10)
sub <- ggdraw() +
draw_label("Binary data randomly generated. Logistic regression classifier \n fitted with decision boundary plotted in black.\nNon-shattered scenarios highlighted in red.",
size = 8)
plots.grid <- plot_grid(plotlist = vc_4_plots.ls,nrow = 4,ncol = 4) # create plot grid
plot_grid(title,plots.grid,sub,ncol = 1, rel_heights = c(0.1,0.9,0.1)) +
ggsave2("vc_4.png",
width = 20,
height = 20,
units = 'cm',
dpi = 600,
type = "cairo-png") # final plot
# dev.off()
|
7ce01c04d0a5a20216ea15b7b850612a6cc38ad5 | 2c2941515fa0db309db1634bb18d907b481ea42f | /Bayesian classifier.R | 4d8d2d68f1df8a8d5c144a7c1f8314ce77992674 | [] | no_license | andy400400/Data-Ming-Exercise | fbf661a000a7bd76744fb616c66190fecd0ae4e2 | d6a4b14a66eda4f5a1eca7f99f3596684814f864 | refs/heads/master | 2021-04-26T23:03:16.175915 | 2018-05-23T14:16:59 | 2018-05-23T14:16:59 | 123,923,570 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,338 | r | Bayesian classifier.R | library(MASS)
library(Rcpp)
library(RSNNS)
data("Pima.tr")
data("Pima.te")
set.seed(1111)
Pima = rbind(Pima.tr,Pima.te)
level_name = NULL
for(i in 1:7){
#Convert Numeric to Factor
Pima[,i] = cut(Pima[,i],breaks = 2,ordered_result = T,include.lowest = T)
level_name <- rbind(level_name,levels(Pima[,i]))
}
#transform to data.frame
level_name = data.frame(level_name)
row.names(level_name) = colnames(Pima)[1:7]
colnames(level_name)= paste("Group",1:2,sep = "")
#離散化屬性水準
level_name
#set training data and testing data
Pima.tr = Pima[1:200,]
Pima.te = Pima[200:nrow(Pima),]
#---------------------------------------------------------------------------------------------
library(bnlearn)
bn = naive.bayes(Pima.tr,"type")
plot(bn)
fitted = bn.fit(bn,Pima.te)
pred = predict(fitted,Pima.te)
outcome <- Pima.te[,"type"]
tab = table(pred,outcome)
#Extract or replace the diagonal of a matrix, or construct a diagonal matrix.
acc = sum(diag(tab)) / sum(tab)
#---------------------------------------------------------------------------------------------
#construct Bayesian network
tan = tree.bayes(Pima.tr, "type")
plot(tan)
net_fitted = bn.fit(tan,Pima.te,method = "bayes")
net_pred = predict(net_fitted,Pima.te)
net_tab = table(net_pred,train)
net_acc = sum(diag(net_tab)) / sum(net_tab) |
dcf5d2857294a8453803d93913f3ffb0dc169141 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/styler/examples/parse_safely.Rd.R | 3ec8f862d8b9ce826a47a4d9b130144c022343de | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 455 | r | parse_safely.Rd.R | library(styler)
### Name: parse_safely
### Title: Save parsing from text
### Aliases: parse_safely
### Keywords: internal
### ** Examples
## Not run:
##D styler:::parse_safely("a + 3 -4 -> x\r\n glück + 1")
##D # This cannot be detected as a EOL style problem because the first
##D # line ends as expected with \n
##D styler:::parse_safely("a + 3 -4 -> x\nx + 2\r\n glück + 1")
## End(Not run)
styler:::parse_safely("a + 3 -4 -> \n glück + 1")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.