blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d4dfcdf9443cd638520b08a2ddf2056a0858fc23
|
709c16710d7cae612de6c779cafb7199813e0f24
|
/AhasHfBkleAmputation/extras/EvidenceExplorer/global.R
|
10e36d1ac7e347070212f1b43cf9b324fd235059
|
[
"Apache-2.0"
] |
permissive
|
OHDSI/StudyProtocols
|
87a17fc3c00488b350f9416c584a1d0334d8dfcb
|
8de0454c6be4c120ba97d7376907d651327573a4
|
refs/heads/master
| 2023-04-27T18:59:35.785026
| 2020-02-16T00:32:52
| 2020-02-16T00:32:52
| 27,415,586
| 37
| 41
| null | 2023-04-25T19:55:45
| 2014-12-02T04:49:53
|
R
|
UTF-8
|
R
| false
| false
| 2,937
|
r
|
global.R
|
blind <- FALSE
fileNames <- list.files(path = "data", pattern = "resultsHois_.*.rds", full.names = TRUE)
resultsHois <- lapply(fileNames, readRDS)
allColumns <- unique(unlist(lapply(resultsHois, colnames)))
addMissingColumns <- function(results) {
presentCols <- colnames(results)
missingCols <- allColumns[!(allColumns %in% presentCols)]
for (missingCol in missingCols) {
results[, missingCol] <- rep(NA, nrow(results))
}
return(results)
}
resultsHois <- lapply(resultsHois, addMissingColumns)
resultsHois <- do.call(rbind, resultsHois)
fileNames <- list.files(path = "data", pattern = "resultsNcs_.*.rds", full.names = TRUE)
resultsNcs <- lapply(fileNames, readRDS)
resultsNcs <- do.call(rbind, resultsNcs)
fileNames <- list.files(path = "data", pattern = "covarNames_.*.rds", full.names = TRUE)
covarNames <- lapply(fileNames, readRDS)
covarNames <- do.call(rbind, covarNames)
covarNames <- unique(covarNames)
formatDrug <- function(x) {
result <- x
result[x == "empagliflozin or dapagliflozin"] <- "other SGLT2i"
result[x == "any DPP-4 inhibitor, GLP-1 agonist, or other select AHA"] <- "select non-SGLT2i"
result[x == "any DPP-4 inhibitor, GLP-1 agonist, TZD, SU, insulin, or other select AHA"] <- "all non-SGLT2i"
return(result)
}
resultsHois$targetDrug <- formatDrug(resultsHois$targetDrug)
resultsHois$comparatorDrug <- formatDrug(resultsHois$comparatorDrug)
resultsHois$comparison <- paste(resultsHois$targetDrug, resultsHois$comparatorDrug, sep = " vs. ")
comparisons <- unique(resultsHois$comparison)
comparisons <- comparisons[order(comparisons)]
outcomes <- unique(resultsHois$outcomeName)
establishCvds <- unique(resultsHois$establishedCvd)
priorExposures <- unique(resultsHois$priorExposure)
timeAtRisks <- unique(resultsHois$timeAtRisk)
timeAtRisks <- timeAtRisks[order(timeAtRisks)]
evenTypes <- unique(resultsHois$evenType)
psStrategies <- unique(resultsHois$psStrategy)
dbs <- unique(resultsHois$database)
heterogeneous <- resultsHois[resultsHois$database == "Meta-analysis (DL)" & !is.na(resultsHois$i2) & resultsHois$i2 > 0.4, c("targetId", "comparatorId", "outcomeId", "analysisId")]
heterogeneous$heterogeneous <- "<span style=\"color:red\">yes</span>"
resultsHois <- merge(resultsHois, heterogeneous, all.x = TRUE)
resultsHois$heterogeneous[is.na(resultsHois$heterogeneous)] <- ""
dbInfoHtml <- readChar("DataSources.html", file.info("DataSources.html")$size)
comparisonsInfoHtml <- readChar("Comparisons.html", file.info("Comparisons.html")$size)
outcomesInfoHtml <- readChar("Outcomes.html", file.info("Outcomes.html")$size)
cvdInfoHtml <- readChar("Cvd.html", file.info("Cvd.html")$size)
priorExposureInfoHtml <- readChar("PriorExposure.html", file.info("PriorExposure.html")$size)
tarInfoHtml <- readChar("Tar.html", file.info("Tar.html")$size)
eventInfoHtml <- readChar("Event.html", file.info("Event.html")$size)
psInfoHtml <- readChar("Ps.html", file.info("Ps.html")$size)
|
3fbcf8f46c48480902b6f2a849fa52863c465451
|
a8130957d7af1c4f3d60c06fa9f7840181135606
|
/class work/2-4-15.R
|
7798a181d3a9b3b7e07adefef067c2dc657c3f76
|
[] |
no_license
|
fehercm/STT-3851
|
fb99fb189ae5eff0a8cdd4923be18aa6658468a0
|
a2db792b53a4a0764c5d96947ab9933e0f8f01ca
|
refs/heads/master
| 2021-01-02T09:19:41.594847
| 2015-04-28T15:51:05
| 2015-04-28T15:51:05
| 29,276,097
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 427
|
r
|
2-4-15.R
|
personality <- read.csv(file=url("http://www1.appstate.edu/~thomleyje/R-datafiles/PersonalitySTT1810.csv"))
table(personality$Gender, personality$EorI)
addmargins(table(personality$Gender, personality$EorI),2)
prop.table(table(personality$Gender, personality$EorI),2)
addmargins(prop.table(table(personality$Gender, personality$EorI)))
addmargins(round(100*prop.table(table(personality$Gender, personality$SorN),1),2),2)
|
fb5033c03009268f52d76e78a35cecadc4933f5a
|
60d6cb128057cd61aa17812fa61fdfc96f8cf1d9
|
/plot1.R
|
5749a733186085f139a4f378b11178786435847f
|
[] |
no_license
|
jackgidding/ExData_Plotting1
|
211a7e817fa3a25c4e2dac6ccfb94f8729c58add
|
321fd6a3a6760e66a359fbacbb79c3cac76c1307
|
refs/heads/master
| 2020-12-24T10:10:31.577383
| 2015-02-08T03:32:39
| 2015-02-08T03:32:39
| 30,461,272
| 0
| 0
| null | 2015-02-07T16:17:40
| 2015-02-07T16:17:40
| null |
UTF-8
|
R
| false
| false
| 930
|
r
|
plot1.R
|
## plot1.R
## Author: Jack Gidding
##
## Purpose: Generate the 1st chart for the Exploratory Data Analysis course
## Project 1.
##
## The chart is a histogram of the UCI data, Global Active Power,
## minute-by-minute samples during January and February 2007.
## The x-label and title are set programmatically. The y-axis is
## the default label.
##
## Output: plot1.png
##
## Notes: Please see the README.md for further information
## Load and prepare the UCI data once, since it is a time expensive operation.
## If the UCI data is already in memory, it does not need to be reloaded for
## each chart.
if (!exists("uci")) {
source("loadUCI.R")
}
## Create the histogram
hist(uci$Global_active_power,
col="red",
xlab="Global Active Power (kilowatts)",
main="Global Active Power")
## Create the PNG of the plot
dev.copy(png, file="plot1.png")
dev.off()
|
03e3d03147b9e446b0b445eef939c440ec0b958a
|
9b3cff0dd9a6e0402747cb68083f71bd3705ebe1
|
/man/correctGeno.Rd
|
01b1d745386c336eb7edede4638606dbd3587032
|
[] |
no_license
|
cran/MPR.genotyping
|
f3656d7e298f5999b80e62ac15f2ac29c25c65d7
|
9d4112d6ddf825f9701d5631b3123b19ef39b67f
|
refs/heads/master
| 2021-05-05T06:34:14.060691
| 2018-01-24T17:24:42
| 2018-01-24T17:24:42
| 118,804,856
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 161
|
rd
|
correctGeno.Rd
|
\name{correctGeno}
\alias{correctGeno}
\title{
Correct Geno
}
\description{
correct Geno
}
\seealso{
\code{\link{hmm.vitFUN.rils}}
}
\keyword{ HMM }
|
52595ab012ef1d367e1d9410d84d9c7c877c861b
|
684aba486bcaef860a928fe6c0e9a53bdcb41cbc
|
/plot3.R
|
59aef363195f3584f3ea08b9015503eee0f51c90
|
[] |
no_license
|
understructure/ExData_Plotting1
|
a6b8e926b789f79a4bd000e2dbfee60236632be8
|
4052545bb481ef1fd4b7060dd10bc6661237e716
|
refs/heads/master
| 2021-01-16T21:40:49.146060
| 2015-04-13T23:35:37
| 2015-04-13T23:35:37
| 33,484,632
| 0
| 0
| null | 2015-04-06T13:56:49
| 2015-04-06T13:56:46
| null |
UTF-8
|
R
| false
| false
| 1,800
|
r
|
plot3.R
|
# download the file and get it into a variable
# NOTE : If you're on Windows or an OS without this directory,
# you must change this to an existing directory in order to
# run this code successfully!
myDir <-"~/Downloads"
setwd(myDir)
# get the file, you may need the rCurl library installed for this to work
www <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
temporaryFile <- tempfile()
download.file(www,destfile=temporaryFile, method="curl")
unzip(temporaryFile, exdir="myDir")
pwr <- read.csv("household_power_consumption.txt", stringsAsFactors=FALSE, sep=";")
# get date into proper format
pwr$date2 <- as.Date(pwr$Date, "%d/%m/%Y")
# create smaller dataset
pwr2 <- pwr[pwr$date2 == "2007-02-01" | pwr$date2 == "2007-02-02",]
# create numeric from character variables
pwr2$Sub_metering_1 <- as.numeric(as.character(pwr2$Sub_metering_1))
pwr2$Sub_metering_2 <- as.numeric(as.character(pwr2$Sub_metering_2))
pwr2$Sub_metering_3 <- as.numeric(as.character(pwr2$Sub_metering_3))
# create and save file
# NOTE : this is also plot 4-1
png(filename = "plot3.png", width = 480, height = 480, units = "px", bg="white")
plot(pwr2$Sub_metering_1, xlab=NA, type="n", ylab="Energy sub metering", xaxt='n', yaxt='n', ylim=c(0,40))
axis(1, at=c(0,1440,2880), labels=c("Thu","Fri", "Sat"))
lines(pwr2$Sub_metering_1, col="black")
lines(pwr2$Sub_metering_2, col="red")
lines(pwr2$Sub_metering_3, col="blue")
axis(2, at=c(0, 10, 20, 30), labels=c("0", "10", "20", "30"))
#?legend
legend("topright", c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"),
lty=c(1,1), # gives the legend appropriate symbols (lines)
lwd=c(1,1),col=c("black", "red","blue") #, bty="n"
#,cex=.7
) # gives the legend lines the correct color and width
dev.off()
|
40854394c6958d17cf56cfa2efdf1b039c8d5cc5
|
088e1a000955bd0725db1739ffa986edc09877c2
|
/4_Exploratory_Data_Analysis/Household_Power_Consumption/getData.R
|
b9aa5d9e45198086dc47a7786101d8f445fed952
|
[] |
no_license
|
maxgaz59/Coursera_Data_Science_Specialisation
|
950e3b4a42e2e7920f0f4d2f0decc275bf8f21c0
|
4f36bb79a2163f1a0b98a5b4355e5898f54b6c10
|
refs/heads/master
| 2021-04-30T16:32:58.775497
| 2017-01-26T03:20:07
| 2017-01-26T03:20:07
| 80,057,440
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,289
|
r
|
getData.R
|
library(dplyr)
setwd("~")
setwd("./[coursera]DataSciences/DataScienceSpecialisation/4_Exploratory/ProgAss1_ExploratoryData")
#rm(list=ls())
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
fileName <- "household_power_consumption.zip"
if (!file.exists(fileName)){
download.file(fileURL, destfile = fileName, method="curl")
unzip(fileName)
}
# colClasses could have also been used to modify the type (from factor to stg else).
if(!exists("dataset", envir = environment())){
print("does not exist")
dataset <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = c("?", "NA"))
dataset <- na.omit(dataset)
dataset <- tbl_df(dataset)
print(object.size(dataset))
}
if(!exists("data2days", envir = environment())){
data2days <- dataset %>%
mutate(Date = as.Date(Date, "%d/%m/%Y"))%>%
filter(Date >="2007-02-01", Date <"2007-02-03")
}
#######
## For the legend with date: get directly the day.
##################################################
#datetime <- strptime(paste(power$Date, power$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
#rm(dataset)
#png(filename='plot1.png', width=480, height=480, units='px')
|
8ffd33576ea66a172fd3b6408d3a1e55e6dbf36a
|
3d52bb75ea458b44c7e2935f818a25117bc4370d
|
/chap1.r
|
724335621948eb0f886bbf67e6f53d348144da1d
|
[] |
no_license
|
omelhoro/r-nlp-baayen
|
1b068853125d9a39872cb400074b839308ed4a98
|
5c71cb96a2a9be715d66e5a14246d717611b4bb0
|
refs/heads/master
| 2020-04-02T12:39:56.776704
| 2016-06-02T18:50:55
| 2016-06-02T18:50:55
| 60,289,180
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 812
|
r
|
chap1.r
|
# TODO: Add comment
#
# Author: igor
###############################################################################
install.packages("languageR")
library("languageR")
#task1
spanishMeta
colnames(spanishMeta)
#task2
xtabs(~FullName,data=spanishMeta)
tapply(spanishMeta$PubDate,spanishMeta$Author, mean)
#task3
spanishMeta[order(spanishMeta$YearOfBirth),]
spanishMeta[order(spanishMeta$Nwords),]
#task4
sort(spanishMeta$PubDate,decreasing = T)
#task5
spanishMeta[spanishMeta$PubDate<1980,]
#task6
mean(spanishMeta$PubDate)
sum(spanishMeta$PubDate)/length(spanishMeta$PubDate)
#task7
composer=data.frame(
FullName=levels(spanishMeta$FullName),
Favorite=c("IgorF","FischerI","BlaBla"))
merge(spanishMeta, composer, by="FullName" )
|
b9115f9a3cd4ecbb0482cc733636f12a7d4c26eb
|
60627dc5c9f23a9bafcf942c5a083629da786785
|
/man/power_eeg_bands.Rd
|
516efd2528972b929b6856f4d88382a30c4c2e02
|
[] |
no_license
|
adigherman/EEGSpectralAnalysis
|
6375fc44e8dd7864c0f1aa39c427a1369de1ddde
|
dadc57bcd0fb1ec39db0b0a9ab3ac1667e695184
|
refs/heads/master
| 2022-12-03T13:00:27.162666
| 2020-08-18T19:28:24
| 2020-08-18T19:28:24
| 234,624,034
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,159
|
rd
|
power_eeg_bands.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/power_eeg_bands.R
\name{power_eeg_bands}
\alias{power_eeg_bands}
\title{Get power values for EEG bands}
\usage{
power_eeg_bands(
eeg_signal,
sampling_frequency = 125,
max_frequency = 32,
num_sec_w = 5,
aggreg_level = 6
)
}
\arguments{
\item{eeg_signal}{EEG signal expressed in micro-Volts}
\item{sampling_frequency}{Sampling frequency of the EEG signal. This is
typically equal to 125Hz. Default value is 125.}
\item{max_frequency}{The maximum frequency for which the spectrum is being
calculated. Default value is 32.}
\item{num_sec_w}{number of seconds in a time window used to
obtain the Fourier coefficients. Typically, this number is 5}
\item{aggreg_level}{number of 5 second intervals used to aggregate
power. Typically, this number is 6 to ensure a 30 second
interval window (standard in EEG analysis)}
}
\value{
List containing the aggregated power values for each EEG band
}
\description{
Calculate power values for
each of the EEG bands:
Delta < 4
Theta >=4 and < 8
Alpha >= 8 and < 14
Beta >= 14 and < 32
Gamma >= 32 and < 50
}
|
275bb6713d484de58321afe024875651b1195c6b
|
bc5430fe73aa5e66981e65ae8c843dd5ba632c6d
|
/Code/korea_data_cleaning.R
|
865fd61fec4ac7fb046125e52f73e1c92fe9b8fc
|
[] |
no_license
|
cdbale/Hackathon
|
085c90c00bbe9da7326efa61c8af169e661ecc04
|
a5e1762009a59cb6b094f18062d79569cc7c0348
|
refs/heads/master
| 2023-04-18T04:23:30.390315
| 2023-04-15T12:13:54
| 2023-04-15T12:13:54
| 265,055,467
| 0
| 1
| null | 2020-05-22T05:54:21
| 2020-05-18T20:35:10
|
R
|
UTF-8
|
R
| false
| false
| 2,627
|
r
|
korea_data_cleaning.R
|
#############################################################################
########### Data Cleaning for South Korea COVID-19 Patients Data ############
### Project Contributors: Matthew Schneider, Jordan Fischer, Cameron Bale ###
#############################################################################
# See data description here: https://www.kaggle.com/kimjihoo/ds4c-what-is-this-dataset-detailed-description.
# Total data includes:
# - Case.csv: Data of COVID-19 infection cases in South Korea
# - PatientInfo.csv: Epidemiological data of COVID-19 patients in South Korea
# - PatientRoute.csv: Route data of COVID-19 patients in South Korea
# - Time.csv: Time series data of COVID-19 status in South Korea
# - TimeAge.csv: Time series of COVID-19 status in terms of the age in South Korea
# - TimeGender.csv: Time series data of COVID-19 status in terms of gender in South Korea
# - TimeProvince.csv: Time series data of COVID-19 status in terms of the province in South Korea
# - Region.csv: Location and statistical data of the regions in South Korea
# - Weather.csv: Data of the weather in the regions of South Korea
# - SearchTrend: Trend data of the keywords searched in NAVER which is one of the largest portals in South Korea
# - SeoulFloating: Data of floating population in Seoul, South Korea
# - Policy: Data of the government policy for COVID-19 in South Korea
#################################
### Reading and Cleaning Data ###
#################################
# load libraries
library(tidyverse)
# Read in patient epidemiological information and route information.
p_info <- read_csv(file = 'Data/PatientInfo.csv', guess_max = 3000)
p_route <- read_csv(file = 'Data/PatientRoute.csv', guess_max = 3000)
# merge data sets (join epidemiological variables to route information)
full <- p_route %>%
left_join(p_info, by = c('patient_id', 'global_num', 'province', 'city')) %>%
mutate_at(c('patient_id', 'infected_by'), as.character) # convert ID variables from numeric to characters
save(full, file = 'Data/korea_data_clean.RData')
################################
### Basic Summary Statistics ###
################################
# number of unique values for each variable, not including NA
n_unique_values <- full %>%
summarise_all(n_distinct, na.rm = TRUE)
# min and max values for relevant variables
min_max_values <- full %>%
summarize_at(c('date', 'latitude', 'longitude',
'birth_year', 'infection_order', 'contact_number',
'symptom_onset_date', 'confirmed_date', 'released_date',
'deceased_date'), c(min, max), na.rm = TRUE)
|
0db3171b44357c84ec23c2bfdd2abde0e55ed40a
|
8452f6a438a5505e1a2517dbebe85bce148b636f
|
/plot1.R
|
b00a4a88a1351271a9a1490c2dc4d208d1624da6
|
[] |
no_license
|
minyili/ExData_Plotting1
|
66945fb400a48e9ae0d836e4d8cfcd234042145e
|
4bb0b2831cf6197345ed157ba07c6f5b15d94abe
|
refs/heads/master
| 2021-01-18T06:30:32.884453
| 2014-07-09T05:37:08
| 2014-07-09T05:37:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,114
|
r
|
plot1.R
|
#setting working directory
#setwd("/Users/myli/Desktop/Data scientist specialization/Propractise/IndividualElectricityPowerConsumption/ExData_Plotting1")
#specify which column to read, columns with type 'NULL' are disregarded
readCol<-c(rep('character',2),'numeric',rep('NULL',6))
#reading the first three columns (Date, Time and Global_active_power) of the whole data set into data
#subsetting data such that it only contains the dates from 1/2/2007 to 2/2/2007
#Notice that I have try the method of reading line by line and rbind into data, but it is very slow....
data<-read.csv("household_power_consumption.txt", sep=";",na.strings="?",header=TRUE,quote="",colClasses=readCol)
data<-data[data$Date %in% c("1/2/2007","2/2/2007"),]
#Open png device; create 'plot1.png' in the working directory
png(file="plot1.png",width = 480, height = 480)
#create plot and send to a file (no plot appears on the screen)
hist(data$Global_active_power,col="red", main="Global active power", xlab ="Global active power (kilowatts)")
#Close the png file device
dev.off()
#Now you can view the file 'plot1.png' on your computer
|
20ef3928e342fcd6a441ead516a0ec247248b6ee
|
1c1ac604314d3c8785a8f3d14f2df1afc7429ad3
|
/tests/testthat/test_pdist.R
|
d24996dc18619c0b6af3d630e570e1114b058815
|
[
"MIT"
] |
permissive
|
jokergoo/cola
|
7abd6dfd0bb487ce601a045f021c0a61359486df
|
8376b71ab216f69fd77b7af9f898048c5dfc6070
|
refs/heads/master
| 2023-06-07T08:34:15.370800
| 2023-06-06T07:45:08
| 2023-06-06T07:45:08
| 91,272,219
| 58
| 14
| null | 2018-03-01T09:50:37
| 2017-05-14T21:21:56
|
R
|
UTF-8
|
R
| false
| false
| 256
|
r
|
test_pdist.R
|
m1 = matrix(rnorm(10*2), nrow = 10)
m2 = matrix(rnorm(10*2), nrow = 10)
d1 = cola:::pdist(t(m1), t(m2), 1)
d2 = as.matrix(dist(t(cbind(m1, m2))))[1:2, 3:4]
dimnames(d2) = NULL
test_that("test pdist", {
expect_equal(all(abs(d1 - d2) < 1e-6), TRUE)
})
|
510dc04b27a3a9b05d3de34b997091f108a198a4
|
db0b537705b0671f527a8d406f13fb1de5b49616
|
/data/test_dataset/dataframe/Data Visualization and Statistics/R/plotting_players_by_clusters.R
|
d2aa17df501dfb4ba81db95d9052bd24d089c7af
|
[] |
no_license
|
rakeshamireddy/Automatic-Code-Translation
|
7e43d9232b1af2f9e1c62e76c1720f9469bdd842
|
9c4d0b040ee2eaccdcec8d8321f262d748c5c4b0
|
refs/heads/master
| 2023-04-22T23:29:55.205930
| 2021-05-15T03:06:03
| 2021-05-15T03:06:03
| 299,511,652
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 103
|
r
|
plotting_players_by_clusters.R
|
nba2d <- prcomp(nba[,goodCols], center=TRUE)
twoColumns <- nba2d$x[,1:2]
clusplot(twoColumns, labels)
|
b00c21cbbc7f28ac409a69c467f2e717691236d6
|
9803cb750744ab39e22ade6864a48246c957d8f9
|
/tests/testthat/test_is_zero_factor.R
|
13c64c69c841a69ea66117526a20003d321144f2
|
[] |
no_license
|
kkdey/flashr
|
19da364375b7bdf23515c9c0fc387383c4701eb4
|
4bc27f39dec59486dfd56c71e610d7032b606b30
|
refs/heads/master
| 2020-04-12T08:47:33.952487
| 2016-11-27T20:42:58
| 2016-11-27T20:42:58
| 65,643,575
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 247
|
r
|
test_is_zero_factor.R
|
library(flashr)
context("Zero Factors")
test_that("is_zero_factor returns correct result",{
l=matrix(rnorm(500),nrow=100,ncol=5)
l[,1]=rep(0,100)
l[,3]=rep(0,100)
expect_equal(is_zero_factor(l),c(TRUE,FALSE,TRUE,FALSE,FALSE))
}
)
|
933324b16877af7968f0d8aa5726eef1571c9e0d
|
f1f38d1f92133aaa0ee5c3df6b0048aaf0dd9054
|
/man/occ_count.Rd
|
3c12dff256a18ffb287f9d40924e8db0412d8aad
|
[
"CC0-1.0"
] |
permissive
|
imclab/rgbif
|
acda8ae9828d4cb281deab6016e1741192e8756b
|
c62edb8ecd0f89796dd18a38cfb8cd327e25584e
|
refs/heads/master
| 2021-01-11T05:02:47.481188
| 2013-11-29T05:40:54
| 2013-11-29T05:40:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,238
|
rd
|
occ_count.Rd
|
\name{occ_count}
\alias{occ_count}
\title{Get number of occurrence records.}
\usage{
occ_count(nubKey = NULL, georeferenced = NULL,
basisOfRecord = NULL, datasetKey = NULL, date = NULL,
catalogNumber = NULL, country = NULL,
hostCountry = NULL, year = NULL, from = 2000,
to = 2012, type = "count", publishingCountry = "US",
callopts = list())
}
\arguments{
\item{nubKey}{Species key}
\item{georeferenced}{Return only occurence records with
lat/long data (TRUE) or all records (FALSE, default).}
\item{basisOfRecord}{Basis of record}
\item{datasetKey}{Dataset key}
\item{date}{Collection date}
\item{year}{Year data were collected in}
\item{catalogNumber}{Catalog number}
\item{country}{Country data was collected in}
\item{hostCountry}{Country that hosted the data}
\item{publishingCountry}{Publishing country, two letter
ISO country code}
\item{from}{Year to start at}
\item{to}{Year to end at}
\item{type}{One of count (default), schema,
basis_of_record, countries, year}
\item{callopts}{Pass on options to httr::GET for more
refined control of http calls, and error handling}
}
\value{
A single numeric value
}
\description{
Get number of occurrence records.
}
\examples{
\dontrun{
occ_count(basisOfRecord='OBSERVATION')
occ_count(georeferenced=TRUE)
occ_count(country='DENMARK')
occ_count(country='CANADA', georeferenced=TRUE, basisOfRecord='OBSERVATION')
occ_count(hostCountry='FRANCE')
occ_count(datasetKey='9e7ea106-0bf8-4087-bb61-dfe4f29e0f17')
occ_count(year=2012)
occ_count(nubKey=2435099)
occ_count(nubKey=2435099, georeferenced=TRUE)
occ_count(datasetKey='8626bd3a-f762-11e1-a439-00145eb45e9a',
basisOfRecord='PRESERVED_SPECIMEN')
occ_count(datasetKey='8626bd3a-f762-11e1-a439-00145eb45e9a', nubKey=2435099,
basisOfRecord='PRESERVED_SPECIMEN')
# Just schema
occ_count(type='schema')
# Counts by basisOfRecord types
occ_count(type='basis_of_record')
# Counts by countries. publishingCountry must be supplied (default to US)
occ_count(type='countries')
# Counts by year. from and to years have to be supplied, default to 2000 and 2012
occ_count(type='year', from=2000, to=2012)
}
}
\references{
\url{http://www.gbif.org/developer/summary}
}
|
a25758b1cc6cd41e250fe49046d3e946631938e7
|
97fa19dc9569076f5830faf93b1346c8ba43b3d8
|
/geo_submission_files.r
|
a8c448b35d8270963492f03d5b86a8c8f054a8f9
|
[] |
no_license
|
CGSbioinfo/GX-Illumina
|
9c4876547ea74d3a3b4280ae271540aac49df78f
|
883f16dec35dc25620d7886da907a35d14385e70
|
refs/heads/master
| 2016-09-01T03:43:56.458499
| 2015-11-29T16:06:56
| 2015-11-29T16:06:56
| 47,068,559
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,995
|
r
|
geo_submission_files.r
|
library(lumi)
library(limma)
library(beadarray)
### Generate raw and normalized data to submit to geo
setwd('Z:/SERVICES/Microarrays/GX-Nugen/Angela Riedel/Sept2014/AR_Sep14_Results/')
raw_data<-lumiR("raw_data.txt")
norm_data<-lumiN(raw_data)
detection_pvals_raw_data=detection(raw_data)
detection_pvals_norm_data=detection(norm_data)
raw_table=as.data.frame(matrix(0,nrow=nrow(raw_data),ncol=(ncol(raw_data)*2)))
nc=1
for (i in 1:length(colnames(raw_data))){
sample=colnames(raw_data)[i]
raw_data_col=which(colnames(exprs(raw_data))==sample)
raw_table[,nc]=exprs(raw_data)[,raw_data_col]
colnames(raw_table)[nc]=sample
detect_pval_index=which(colnames(detection_pvals_raw_data)==sample)
raw_table[,nc+1]=detection_pvals_raw_data[,raw_data_col]
colnames(raw_table)[nc+1]='Detection Pval'
nc=2*i+1
print(i)
print(nc)
}
raw_table=cbind(ID_REF=rownames(exprs(raw_data)), raw_table)
norm_table=as.data.frame(matrix(0,nrow=nrow(norm_data),ncol=(ncol(norm_data)*2)))
nc=1
for (i in 1:length(colnames(norm_data))){
sample=colnames(norm_data)[i]
norm_data_col=which(colnames(exprs(norm_data))==sample)
norm_table[,nc]=exprs(norm_data)[,norm_data_col]
colnames(norm_table)[nc]=sample
detect_pval_index=which(colnames(detection_pvals_norm_data)==sample)
norm_table[,nc+1]=detection_pvals_norm_data[,norm_data_col]
colnames(norm_table)[nc+1]='Detection Pval'
nc=2*i+1
print(i)
print(nc)
}
norm_table=cbind(ID_REF=rownames(exprs(norm_data)), norm_table)
dir.create("geo_submission_files", showWarnings=F)
setwd('geo_submission_files/')
write.csv(raw_table, 'matrix_non_normalized.csv', row.names=FALSE)
write.csv(norm_table, 'matrix_normalized.csv', row.names=FALSE)
setwd("..")
## Geo files
dir.create("geo_files", showWarnings=F)
setwd('geo_files/')
produceGEOPlatformFile(x.lumi = raw_data)
produceGEOSampleInfoTemplate(lumiNormalized = norm_data)
produceGEOSubmissionFile(lumiNormalized = norm_data, lumiRaw = raw_data, sampleInfo = 'GEOsampleInfo.txt')
|
a22ff341d087922b1744a0625fc6f6ebc4e821f9
|
d9d4c5f99898d63552201cc30adc873c71042d20
|
/man/create_graph.Rd
|
f098a63a21bf6afad9938e64af4c99347a85cdf0
|
[] |
no_license
|
UweBlock/DiagrammeR
|
f010857b5e95943c0ae5de458afc63a2daf83d8d
|
203fc1d8a64bf518d4cb5b8339df57c7a1f4ece0
|
refs/heads/master
| 2020-05-29T11:45:30.027097
| 2015-11-11T10:22:01
| 2015-11-11T10:22:01
| 46,001,828
| 1
| 0
| null | 2015-11-11T18:38:58
| 2015-11-11T18:38:57
| null |
UTF-8
|
R
| false
| true
| 3,327
|
rd
|
create_graph.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_graph.R
\name{create_graph}
\alias{create_graph}
\title{Create a graph object using data frames representative of nodes and edges}
\usage{
create_graph(nodes_df = NULL, edges_df = NULL, graph_attrs = NULL,
node_attrs = NULL, edge_attrs = NULL, directed = TRUE,
graph_name = NULL, graph_time = NULL, graph_tz = NULL,
generate_dot = TRUE)
}
\arguments{
\item{nodes_df}{an optional data frame containing, at minimum, a column
(called \code{nodes}) which contains node IDs for the graph. Additional
columns (named as Graphviz node attributes) can be included with values for
the named node attribute.}
\item{edges_df}{an optional data frame containing, at minimum, two
columns (called \code{from} and \code{to}) where node IDs are provided.
Additional columns (named as Graphviz edge attributes) can be included with
values for the named edge attribute.}
\item{graph_attrs}{an optional vector of graph attribute statements that
can serve as defaults for the graph.}
\item{node_attrs}{an optional vector of node attribute statements that can
serve as defaults for nodes.}
\item{edge_attrs}{an optional vector of edge attribute statements that can
serve as defaults for edges.}
\item{directed}{with \code{TRUE} (the default) or \code{FALSE}, either
directed or undirected edge operations will be generated, respectively.}
\item{graph_name}{an optional string for labeling the graph object.}
\item{graph_time}{a date or date-time string (required for insertion of
graph into a graph series of the type \code{temporal}).}
\item{graph_tz}{an optional value for the time zone (\code{tz})
corresponding to the date or date-time string supplied as a value to
\code{graph_time}. If no time zone is provided then it will be set to
\code{GMT}.}
\item{generate_dot}{an option to generate Graphviz DOT code and place
into the graph object.}
}
\value{
a graph object of class \code{dgr_graph}.
}
\description{
Generates a graph object using data frames for nodes and/or
edges; the graph object can be manipulated by other functions.
}
\examples{
\dontrun{
# Create an empty graph
graph <- create_graph()
# Create a graph with nodes but no edges
nodes <- create_nodes(nodes = c("a", "b", "c", "d"))
graph <- create_graph(nodes_df = nodes)
# Create a graph with nodes with values, types, labels
nodes <- create_nodes(nodes = c("a", "b", "c", "d"),
label = TRUE,
type = c("type_1", "type_1",
"type_5", "type_2"),
shape = c("circle", "circle",
"rectangle", "rectangle"),
values = c(3.5, 2.6, 9.4, 2.7))
graph <- create_graph(nodes_df = nodes)
# Create a graph from an edge data frame, the nodes will
edges <- create_edges(from = c("a", "b", "c"),
to = c("d", "c", "a"),
rel = "leading_to")
graph <- create_graph(edges_df = edges)
# Create a graph with both nodes and nodes defined, and,
# add some default attributes for nodes and edges
graph <- create_graph(nodes_df = nodes,
edges_df = edges,
node_attrs = "fontname = Helvetica",
edge_attrs = c("color = blue",
"arrowsize = 2"))
}
}
|
5649960d5c08bb940c9b1d748b8ecfef36716e78
|
3fe30961b6b2d54597ff38d5bf02359a4b6f859a
|
/R/REV.R
|
b0e46f0bf3306305d922a81fe561f66fcd0b7114
|
[] |
no_license
|
cran/mcmcabn
|
83d978ac43e0da88b740b7166c1cc53811f4dbdd
|
ffe2262400c2d1771270977720a068a83b3b067e
|
refs/heads/master
| 2022-11-30T01:14:27.043816
| 2022-11-18T22:30:02
| 2022-11-18T22:30:02
| 174,589,227
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,845
|
r
|
REV.R
|
REV <- function(n.var, dag.tmp, retain, ban, max.parents, sc, score.cache, score, verbose, heating) {
rejection <- 1
A <- 0
# stor number of edges
n.edges <- sum(dag.tmp)
# randomly select one
if (sum(dag.tmp*(1-retain)*(1-t(ban))) != 0){
#i->j
selected.edge <- which(x = ((dag.tmp*(1-retain)*(1-t(ban))) == 1), arr.ind = TRUE)[sample(x = 1:sum(dag.tmp*(1-retain)*(1-t(ban))), size = 1), ,drop=FALSE]
# store current parent set (j)
parent.set <- dag.tmp[selected.edge[1], ,drop=FALSE]
# remove parent set (i and j)
dag.M.dot <- dag.tmp
dag.M.dot[selected.edge[1],] <- 0
dag.M.dot[selected.edge[2],] <- 0
# store descendents j row i col
descendents.M.dot.i <- descendents(nodes = unname(selected.edge[2]), dag = dag.M.dot)
descendents.M.dot.j <- descendents(nodes = unname(selected.edge[1]), dag = dag.M.dot)
# mark all parent sets of i score that do not include j
sc.tmp <- sc[score.cache$children == selected.edge[2], ,drop=FALSE]
sc.tmp <- sc.tmp[sc.tmp[, selected.edge[1]] == 1, ,drop=FALSE]
#if (!is.null(descendents.M.dot.i)) {
sc.tmp <- sc.tmp[rowSums(sc.tmp[, descendents.M.dot.i,drop=FALSE] == 0) == length(descendents.M.dot.i), ,drop=FALSE]
#}
new.parent.i <- sc.tmp[sample(x = 1:nrow(sc.tmp), size = 1, prob = range01(sc.tmp[, ncol(sc.tmp)] -
sum(sc.tmp[, ncol(sc.tmp)]))), ,drop=FALSE]
new.parent.i <- new.parent.i[-length(new.parent.i)]
# store partition function
#z.star.x.i.M.dot <- (logSumExp(sc.tmp[, ncol(sc.tmp)]))
#z.star.x.i.M.dot <- (sum(sc.tmp[, ncol(sc.tmp)]))
# M direct sum
dag.M.cross <- dag.M.dot
dag.M.cross[selected.edge[2], ] <- new.parent.i
# descendant of i
descendents.M.cross.i <- descendents(nodes = unname(selected.edge[2]), dag = dag.M.cross)
descendents.M.cross.j <- descendents(nodes = unname(selected.edge[1]), dag = dag.M.cross)
#descendents.M.j <- descendents(nodes = unname(selected.edge[1]), dag = dag.tmp)
# score node j not descendant of M
sc.tmp <- sc[score.cache$children == selected.edge[1], ,drop=FALSE]
# remove descendents of i
#if (!is.null(descendents.M.cross.j)) {
#sc.tmp <- sc.tmp[rowSums(sc.tmp[, descendents.M.j,drop=FALSE] == 0) == length(descendents.M.j),, drop=FALSE]
sc.tmp <- sc.tmp[rowSums(sc.tmp[, descendents.M.cross.j,drop=FALSE] == 0) == length(descendents.M.cross.j),, drop=FALSE]
#}
new.parent.j <- sc.tmp[sample(x = 1:nrow(sc.tmp), size = 1, prob = range01(sc.tmp[, ncol(sc.tmp)] -
sum(sc.tmp[, ncol(sc.tmp)]))), ,drop=FALSE]
new.parent.j <- new.parent.j[-length(new.parent.j)]
# store partition function
#z.x.j.M.cross <- (logSumExp(sc.tmp[, ncol(sc.tmp)]))
#z.x.j.M.cross <- (sum(sc.tmp[, ncol(sc.tmp)]))
# M tilde
dag.M.tilde <- dag.M.cross
dag.M.tilde[selected.edge[1],] <- new.parent.j
n.edges.tilde <- sum(dag.M.tilde)
############################## computing acceptance probability ##############################################
if(FALSE){
# score node j that do not include i
sc.tmp <- sc[score.cache$children == selected.edge[1], ,drop=FALSE]
sc.tmp <- sc.tmp[sc.tmp[, selected.edge[2]] == 1, ,drop=FALSE]
sc.tmp <- sc.tmp[rowSums(sc.tmp[, descendents.M.dot.j,drop=FALSE] == 0) == length(descendents.M.dot.j), ,drop=FALSE]
#z.star.x.j.M.dot <- (logSumExp(sc.tmp[, ncol(sc.tmp)]))
dag.M.tilde.cross <- dag.M.dot
dag.M.tilde.cross[selected.edge[1],] <- parent.set
# descendant
descendents.M.tilde.cross.i <- descendents(nodes = unname(selected.edge[2]), dag = dag.M.tilde.cross)
sc.tmp <- sc[score.cache$children == selected.edge[2], ,drop=FALSE]
if (!is.null(descendents.M.tilde.cross.i)) {
sc.tmp <- sc.tmp[rowSums(sc.tmp[, descendents.M.tilde.cross.i,drop=FALSE] == 0) == length(descendents.M.tilde.cross.i),
,drop=FALSE]
}
z.x.i.M.tilde.cross <- (logSumExp(sc.tmp[, ncol(sc.tmp)]))
}
############################## Acceptance probability
#score.A <- min(exp((z.star.x.i.M.dot / (z.star.x.j.M.dot) * (z.x.j.M.cross) / (z.x.i.M.tilde.cross))*n.edges/n.edges.tilde),1)
s.proposed <- score.dag(dag.M.tilde,score.cache,sc)
s.current <- score.dag(dag.tmp,score.cache,sc)
A <- min(exp(( s.proposed - s.current) * (n.edges/n.edges.tilde) ), 1)
#if(is.nan(score.A)){score.A <- 0}
#if((score.A)<0){score.A <- 0}
#A <- min(1, score.A)
#if (rbinom(n = 1, size = 1, prob = A) == 1) {
if (runif(1)<(exponent(A,heating))) {
rejection <- 0
dag.tmp <- dag.M.tilde
score <- score.dag(dag.M.tilde,score.cache,sc)
}
}#eoif
############################## Return
return(list(dag.tmp = dag.tmp, score = score, alpha = A, rejection = rejection))
} #EOF
|
2522146c6db04a569de0050d051de0f91573468d
|
dab398fd11e87204187cebad9771b5d09f87faac
|
/man/specificity.Rd
|
4be728f587fbb023e86ba718617ab622e64198e3
|
[] |
no_license
|
mdlincoln/modeltests
|
ca4c8fd4a9af82efad32fe28dfee27f2f910fd17
|
d6268f425fb4a2d646f708475227c5a4c3b60f6f
|
refs/heads/master
| 2021-01-21T17:46:22.194647
| 2015-01-25T22:55:50
| 2015-01-25T22:55:50
| 29,830,643
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 592
|
rd
|
specificity.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/modeltests.R
\name{specificity}
\alias{specificity}
\title{Calculate specificity}
\usage{
specificity(truth, pred)
}
\arguments{
\item{truth}{A logical vector of true values}
\item{pred}{A logical vector of values predicted by the model}
}
\value{
numeric value
}
\description{
This function calcuates the fraction of the items \emph{not} in the class that the
classifier correctly identifies
}
\examples{
t <- c(TRUE, TRUE, FALSE, TRUE, FALSE)
p <- c(TRUE, FALSE, TRUE, TRUE, FALSE)
specificity(t, p)
}
|
532b4eeb42aaa3db77d949c091c92103191da6b9
|
b005168d2b1cb99440d9ca936a1d284e4a1a84ae
|
/human.R
|
840e64275f447f482f46d83920bb0c1214b1426c
|
[] |
no_license
|
miljavon/IODS-project
|
56940feeebe4e52c5dcf3d398ebf3ace86a84b04
|
8bc66d640e8bf1ccb8cdab2c7eef7b2a1f5f00af
|
refs/heads/master
| 2021-05-01T17:17:56.028794
| 2017-02-24T21:18:14
| 2017-02-24T21:18:14
| 79,423,939
| 0
| 0
| null | 2017-01-19T06:43:39
| 2017-01-19T06:43:39
| null |
UTF-8
|
R
| false
| false
| 2,316
|
r
|
human.R
|
hd <- read.csv("http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/human_development.csv", stringsAsFactors = F)
gii <- read.csv("http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/gender_inequality.csv", stringsAsFactors = F, na.strings = "..")
str(hd)
#195 obs of 8 variables
str(gii)
#195 obs of 10 variables
#printing out the summaries of the variables
summary(hd)
summary(gii)
#renaming the variables
hd$dev_rank <-hd$HDI.Rank
hd$country <- hd$Country
hd$i <- hd$Human.Development.Index..HDI.
hd$life<-hd$Life.Expectancy.at.Birth
hd$eduexp<-hd$Expected.Years.of.Education
hd$edumean <- hd$Mean.Years.of.Education
hd$gni <- hd$Gross.National.Income..GNI..per.Capita
hd$gni_minus_hdi <-hd$GNI.per.Capita.Rank.Minus.HDI.Rank
gii$rank <- gii$GII.Rank
gii$country <- gii$Country
gii$gii <- gii$Gender.Inequality.Index..GII.
gii$mother_dead <- gii$Maternal.Mortality.Ratio
gii$preggo <- gii$Adolescent.Birth.Rate
gii$repres <- gii$Percent.Representation.in.Parliament
gii$seceduf <- gii$Population.with.Secondary.Education..Female.
gii$secedum <- gii$Population.with.Secondary.Education..Male.
gii$laborf <- gii$Labour.Force.Participation.Rate..Female.
gii$laborm <- gii$Labour.Force.Participation.Rate..Male.
#colnames(gii)[1] <- c("a")
hd1 <- select(hd, -(1:8))
str(hd1)
gii1 <- select(gii, -(1:11))
gii2 <- select(gii1, -(1))
str(gii2)
#Mutating the data
library(dplyr)
gii <- mutate(gii, ratioedu = seceduf/secedum)
gii <- mutate(gii, ratiolabour = laborf/laborm)
str(gii)
#Joining the data
join_by <- c("country")
human <- inner_join(gii2, hd1, by = join_by, suffix = c(".gii", ".hd") )
#The GNI as numeric
library(tidyr)
library(stringr)
str_replace(human$gni, pattern=",", replace ="")%>%as.numeric
human$gni
#Keeping only the certain columns
keep <- c("country", "seceduf", "laborf", "life", "eduexp", "gni", "mother_dead", "preggo", "repres")
human <- select(human, one_of(keep))
str(human)
#Filtering out the NA s
human_ <- filter(human, complete.cases(human))
#Excluding the rows of areas
last <- nrow(human) - 7
human_ <- human[1:last, ]
#Adding countries as rownames
rownames(human_) <- human_$country
human_ <- select(human, -country)
str(human_)
install.packages("FactoMineR")
|
641ae5d949187407884bc4eac41728f5aae14581
|
59f00bf769f88c9c0bfd91f3091d4a7e49517b73
|
/R/z_test.R
|
787860966d28c6becdf571e30736c66d516d93b1
|
[
"MIT"
] |
permissive
|
RachelQueen1/YorkPackage
|
be2c335d40bd22c25c4e9c06ec640903d7afebee
|
754b74f8df56b65b797000af96afac0404d4dd8b
|
refs/heads/master
| 2020-12-06T03:19:28.544440
| 2020-01-07T13:33:44
| 2020-01-07T13:33:44
| 232,316,884
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26
|
r
|
z_test.R
|
z_test <- function(v){
}
|
34a7e442668c81428612213f4db7a1da52cdf9b1
|
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
|
/B_analysts_sources_github/bcaffo/brisk/display.R
|
ed6a565a41eb278d5193e6a742c307074bfa4679
|
[] |
no_license
|
Irbis3/crantasticScrapper
|
6b6d7596344115343cfd934d3902b85fbfdd7295
|
7ec91721565ae7c9e2d0e098598ed86e29375567
|
refs/heads/master
| 2020-03-09T04:03:51.955742
| 2018-04-16T09:41:39
| 2018-04-16T09:41:39
| 128,578,890
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,842
|
r
|
display.R
|
display <- function(file, type = "analyze", imageCenter = c(0, 0, 0),
roi = TRUE,
positive = TRUE,
negative = TRUE,
plevels = 1,
nlevels = 1
){
if (type == "analyze") img2 <- extract.data(read.ANALYZE(file, setmask = FALSE))[,,,1]
else if (type == "nifti") img2 <- extract.data(read.NIFTI(file, setmask = FALSE))[,,,1]
else if (type == "img") img2 <- file
else stop("unsupported file type")
img <- extract.data(read.ANALYZE( "ROIimage", setmask = FALSE))[,,,1]
roiDim <- dim(img)
imageDim <- dim(img2)
roiCenter <- c(46, 64, 37)
resize <- array(0, roiDim);
resize[(1 : imageDim[1]) + roiCenter[1] - imageCenter[1],
(1 : imageDim[2]) + roiCenter[2] - imageCenter[2],
(1 : imageDim[3]) + roiCenter[3] - imageCenter[3]] = img2;
rm(img2)
resize <- downsample(resize)
img <- downsample(img)
timg <- array(0, dim(img))
timg[img > 0 & img < max(img)] <- 1
if (roi) contour3d(timg, level= 1, smooth = 20, fill = TRUE, mask = array(TRUE, dim(img)), alpha = .2, add = FALSE)
timg <- array(0, dim(resize))
if (negative) {
timg[resize < 0] <- -1 * resize[resize < 0]
contour3d(timg,
level= nlevels,
color = heat.colors(length(nlevels)),
smooth = 20,
fill = TRUE,
mask = array(TRUE, dim(timg)),
alpha = .8,
add = TRUE)
}
if (positive) {
timg[resize > 0] <- resize[resize > 0]
contour3d(timg,
level= plevels,
color = topo.colors(length(plevels)),
smooth = 20,
fill = TRUE,
mask = array(TRUE, dim(timg)),
alpha = .8,
add = TRUE)
}
return(NULL)
}
|
e6901f22aeb0c56a7b8d59400619adb1ac8b0d93
|
c1bf288727b9ff4a63b3ad82b86153848bc194aa
|
/app/ui.R
|
d51b3fc89d4def1fdc40ae99bf360f63803100eb
|
[
"BSD-3-Clause"
] |
permissive
|
HimesGroup/pargasite
|
fa0df418ca70551e46e970197b761d7d4a301daa
|
b5e7f8adee320372bcb404a0dd6570184dd27336
|
refs/heads/master
| 2023-08-23T05:24:16.080244
| 2023-08-08T16:02:49
| 2023-08-08T16:02:49
| 139,889,958
| 3
| 4
|
BSD-3-Clause
| 2023-08-08T16:02:51
| 2018-07-05T19:05:32
|
HTML
|
UTF-8
|
R
| false
| false
| 8,908
|
r
|
ui.R
|
#.libPaths("/home/rebecca/R/x86_64-pc-linux-gnu-library/3.4/")
#.libPaths("/home/maya/R/x86_64-pc-linux-gnu-library/3.4/")
library(leaflet)
library(shinyWidgets)
shinyUI(fluidPage(theme = "bootstrap.css",
setBackgroundColor("ghostwhite"),
tags$style(HTML("body {line-height: 1.75}")),
title= "Pollution-Associated Risk Geospatial Analysis SITE (PARGASITE)",
titlePanel(h1(HTML(paste(h1(style = "color:black", "Pollution-Associated Risk Geospatial Analysis SITE (PARGASITE)"))), align = "left")),
tabsetPanel(
tabPanel(title = HTML(paste(h3(style = "color:black","USA"))),
sidebarPanel(
h4("Map View"),
selectizeInput(inputId = "year",
label = "Year",
choices = c("1997":"2021"),
selected = "2021",
multiple = FALSE),
selectizeInput(inputId = "pollutant",
label = "Pollutant",
choices = c("PM2.5", "Ozone", "NO2", "SO2", "CO"),
select = "PM2.5",
multiple = FALSE),
p(textOutput("notes")),
h5(textOutput("latlong")),
h5(textOutput("pollutant_val")),
hr(),
h4("Upload dataset to get corresponding pollution estimates"),
p("The file returned will have a column for each pollutant; the value will be an average of monthly estimates over the specified time period."),
fluidRow(
column(6, selectizeInput('start_month', label = "Start month",
choices = c("Jan", "Feb", "March", "April", "May",
"June", "July", "Aug", "Sept", "Oct",
"Nov", "Dec"),
selected = "Jan",
multiple = FALSE)),
column(6, selectizeInput(inputId = "start_year",
label = "Start Year",
choices = c("1997":"2021"),
selected = "2015",
multiple = FALSE))),
fluidRow(
column(6, selectizeInput('end_month', label = "End month",
choices = c("Jan", "Feb", "March", "April", "May",
"June", "July", "Aug", "Sept", "Oct",
"Nov", "Dec"),
selected = "March",
multiple = FALSE)),
column(6, selectizeInput(inputId = "end_year",
label = "End Year",
choices = c("1997":"2021"),
selected = "2015",
multiple = FALSE))),
h5(p("Choose .csv file with Latitude and Longitude columns. A sample input file can be downloaded",downloadLink("downloadData", "here."))),
fileInput("user_file", " ",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,
text/plain",
".csv")),
downloadButton("finalDownload", "Download"),hr(),
h6(p("Greenblatt RE, Himes BE. Facilitating Inclusion of Geocoded Pollution Data into Health Studies. AMIA Jt Summits Transl Sci Proc. 2019;2019:553–561.(PMID:",
a("31259010",href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6568125/",target="_blank"),").", a("GITHUB",href="https://github.com/HimesGroup/pargasite",target="_blank")," repository."))),
leafletOutput("map", width = "60%",height= 600)
),
tabPanel(title = HTML(paste(h3(style = "color:black","MMSA"))),
sidebarPanel(
h4("Map View"),
selectizeInput(inputId = "year_mmsa",
label = "Year",
choices = c("1997":"2021"),
selected = "2021",
multiple = FALSE),
selectizeInput(inputId = "pollutant_mmsa",
label = "Pollutant",
choices = c("PM2.5", "Ozone", "NO2", "SO2", "CO"),
select = "PM2.5",
multiple = FALSE),
p(textOutput("notes_mmsa")),
h5(textOutput("latlong_mmsa")),
h5(textOutput("pollutant_val_mmsa")),
hr(),
h4("Use the download button to get pollution estimates corresponding to each MMSA and pollutant"),
p("The file returned will have three columns. The first and second column lists the GEOID and names of all the available MMSA and the third column returns the pollutant value corresponding to the MMSA. Change the input parameters (year and pollutant) to download the data for different years and pollutants."),
downloadButton("finalDownload_MMSA", "Download"),hr()
),
leafletOutput("map_mmsa", width = "60%",height= 600)
),
tabPanel(title = HTML(paste(h3(style = "color:black", "County"))),
sidebarPanel(
h4("Map View"),
selectizeInput(inputId = "year_county",
label = "Year",
choices = c("1997":"2021"),
selected = "2021",
multiple = FALSE),
selectizeInput(inputId = "pollutant_county",
label = "Pollutant",
choices = c("PM2.5", "Ozone", "NO2", "SO2", "CO"),
select = "PM2.5",
multiple = FALSE),
p(textOutput("notes_county")),
h5(textOutput("latlong_county")),
h5(textOutput("pollutant_val_county")),
hr(),
h4("Use the download button to get pollution estimates corresponding to each county and pollutant"),
p("The file returned will have three columns. The first two columns lists the GOEID and names of all the available counties and the third column returns the pollutant value corresponding to the MMSA. Change the input parameters (year and pollutant) to download the data for different years and pollutants."),
downloadButton("finalDownload_county", "Download"),hr()
),
leafletOutput("map_county", width = "60%",height= 600)
),
tabPanel(title = HTML(paste(h3(style = "color:black", "About"))),
includeMarkdown("data/home.md")
))
)
)
|
78464aef203b9b93476bf33eb524c586a0f75165
|
15d195cb63018eae33fb6cb95e5ce51c6c155e6a
|
/scripts/04_analyse3.R
|
a4831c74952e58e70dcd171fa742f7dbc807b9d5
|
[] |
no_license
|
tsimonso/Coursera_reprodResearch_courseProject1
|
70149ca8b0694fa3ba0a23a8697f3bcf339908c3
|
e303e4d2fe5e476cf126aafd9fbe0367d9173ebd
|
refs/heads/master
| 2022-11-06T06:35:16.367088
| 2020-06-20T00:56:46
| 2020-06-20T00:56:46
| 272,913,367
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,196
|
r
|
04_analyse3.R
|
## IMPUTING MISSING VALUES
## =======================
## Calculate and report the total number of missing values in the dataset (i.e. the total number of rows with NAs)
## ===============================================================================================================
sum(!complete.cases(activity))
## Calculate the proportion of missing data for 'steps'
## ----------------------------------------------------
sum(is.na(activity$steps))/length(activity$steps)
## Devise a strategy for filling in all of the missing values in the dataset. The strategy does not need to be sophisticated.
## For example, you could use the mean/median for that day, or the mean for that 5-minute interval, etc.
## Create a new dataset that is equal to the original dataset but with the missing data filled in.
## ==========================================================================================================================
## Look for patterns in missing data
## ---------------------------------
activity%>%
missing_plot()
activity%>%
group_by(date)%>%
summarise(steps=sum(steps),stepsNArm=sum(steps, na.rm = TRUE))%>% #The value in the first column will be NA if there are NAs that day, The value in the second column will be the total number of steps that day, ignoring the NAs
filter(is.na(steps))
## interpretation: days have either no data or complete data
#prepare for imputation
activity_imputation<-full_join(activity,intervalActivity,by="interval_t")
activity_imputation%>%
mutate(imputation=is.na(steps))->activity_imputation #create a logical vector that identifies the rows with missing data
#do the imputation
activity_imputation[activity_imputation$imputation==TRUE,]$steps<-round(activity_imputation[activity_imputation$imputation==TRUE,]$steps_mean)
head(activity_imputation)
## Make a histogram of the total number of steps taken each day and Calculate and report the mean and median total number of steps taken per day.
## Do these values differ from the estimates from the first part of the assignment?
## What is the impact of imputing missing data on the estimates of the total daily number of steps?
## ==============================================================================================================================================
## Calculate the total number of steps taken per day
activity_imputation%>%
group_by(date)%>%
summarise(steps=sum(steps))->dailyActivity_wImputation
## Make a histogram of the total number of steps taken each day
nrNwi<-sum(complete.cases(dailyActivity_wImputation))
setwd("./graphs")
png("dailyActivity_wImput.png")
hist(dailyActivity_wImputation$steps,ylim=c(0,40),
main="Distribution of daily activity \n (with imputation)",
xlab="Number of steps",ylab="Frequency")
par(adj = 0)
title(sub=paste("N=",nrNwi))
par(adj = 0.5)
dev.off()
setwd(wd)
## Calculate and report the mean and median of the total number of steps taken per day
dailyActivity_wImputation%>%
summarise(steps_mean=mean(steps, na.rm = TRUE), steps_median=median(steps, na.rm = TRUE))->dailyActivity_wImputation_summary
print(dailyActivity_wImputation_summary)
|
e2bff590b5ff4c6c9ecaceb3f605ae5980033b20
|
687807152165cc49493a40b8c02ebbcc3d98aec2
|
/code/highLevelSearchAlgos.R
|
f440b1ea7cdbabe2068148bb162adfab71d0cb1d
|
[
"LicenseRef-scancode-unknown-license-reference",
"CC-BY-4.0",
"CC-BY-2.0"
] |
permissive
|
chirimacha/simple_bandit
|
48cfed8ddd9b4097fd362ec39a49ccb90f1aba6f
|
5fb095914d4cfc1bfa79f6e72d84e8e39b6a5f4b
|
refs/heads/master
| 2021-03-24T13:16:31.121768
| 2017-06-30T22:29:36
| 2017-06-30T22:29:36
| 95,824,071
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,098
|
r
|
highLevelSearchAlgos.R
|
'
################################
Bandit Simulations to use to compare global and bandit searching
This code:
-Calls Bandit Code
-[Optionally] Simulates infestations
-Simulates Searches: Provides mechanism for both a random search and a ring search
-Simulates a search using a bandit based on the parameters of the bandit
To call this code you need to:
Set prevalence/arm parameters
######################################
'
library(reshape2)
library(ggplot2)
library(pROC)
library(sm)
setwd(Sys.getenv("SIMPLE_BANDIT"))
#CALL BANDIT CODE - This uses the bandit code created by S. Gutfraind and S. Nutman
source("code/bandit.R")
source("code/helperFunctions.R")
##' Ring Search ("Battleship")
##' 1. Randomly selects sites until makes a hit
##' 2. Explores the ring around the known hit. If exhausts all known rings, reverts to random search
##' st = state. on first call st==NULL which causes this to initialize using RingSearchInitialize
##' random_search=TRUE puts the code into purely random search, i.e. does not use rings (for benchmarking)
##' @param infestation = a rectangular grid of sites
##' @param st = the state of the algorithm, including number of sites uncovered etc
##' @param max_actions = number of searches the algorithm can conduct, before exiting
##' @param random_search = visit at random, instead of going for rings (neighbors) of known infestations
##' @params params = parameters that describe size of grid, parameters for the infestation, and size of the ring to be searched
RingSearch <- function(infestation, st=NULL, max_actions=Inf, params=NULL, random_search=FALSE) {
if(is.null(st)) {
st <- RingSearchInitialize(infestation=infestation, params=params)
}
initial_cost <- tail(st$running_stats[["total_cost"]], 1)
ContinueInspection <- params[["ContinueInspection"]]
if(is.null(ContinueInspection)) {
ContinueInspection <- function(infestation, latest, st) {
return(latest$total_cost < params$max_cost & dim(latest)[1] < st$total_squares);
}
}
next_stat <- tail(st$running_stats, 1)
while(next_stat$total_visited < st$total_squares & next_stat$total_cost < initial_cost + max_actions &
ContinueInspection(infestation, next_stat, st)) {
#next_stat$step <- next_stat$step + 1
next_site <- NULL
#if(next_suspected_nb < dim(suspected_nbs)[1]){
# browser()
#}
while (st$next_suspected_nb <= dim(st$suspected_nbs)[1] & (! random_search)) {
next_nb <- st$suspected_nbs[st$next_suspected_nb,]
st$next_suspected_nb <- st$next_suspected_nb + 1
if(st$visited[next_nb$lat, next_nb$lon] == 0) {
next_site <- next_nb
st$visited_coordinates<-rbind(st$visited_coordinates,next_site)
break
} else {
}
}
while (is.null(next_site) & st$next_random_idx <= st$total_squares) {
next_site <- st$randomized_sites[st$next_random_idx,]
if (st$visited[next_site$lat, next_site$lon] == 0) {
st$next_random_idx <- st$next_random_idx + 1
st$visited_coordinates<-rbind(st$visited_coordinates,next_site)
break
} else {
next_site <-NULL
}
st$next_random_idx <- st$next_random_idx + 1
}
if(is.null(next_site)) {
break
}
next_stat$total_cost <- next_stat$total_cost + 1
next_stat$total_visited <- next_stat$total_visited + 1
st$visited[next_site$lat, next_site$lon] <- 1.0 #visitted == 1 (visited), == 2 (found infestation)
if(infestation[next_site$lat, next_site$lon] > 0) {
st$visited[next_site$lat, next_site$lon] <- 2.0 #dim(st$running_stat)[1]/10.0
st$known_infested <- rbind(st$known_infested, next_site)
next_stat$total_found <- next_stat$total_found + 1
next_stat$total_bugs <- next_stat$total_bugs + (infestation[next_site$lat, next_site$lon])
neighbors <- RingSearchGetUnvisitedNbs(next_site, st$visited, ring=params$ring)
st$suspected_nbs <- rbind(st$suspected_nbs, neighbors)
#debugg addition
if (dim(st$suspected_nbs)[1]>0) {
rownames(st$suspected_nbs) <- seq(dim(st$suspected_nbs)[1])
}
# print(st$suspected_nbs)
#print(sprintf("1, lon=%d, lat=%d", next_site$lon, next_site$lat))
} else {
#print(0)
}
#st$running_stats <- rbind(st$running_stats, next_stat)
st$running_stats <- rbind(st$running_stats, next_stat)
next_site <- NULL
}
st$running_stats$infestation_lower_bound <- st$running_stats$total_found/st$total_squares
st$running_stats$infestation_estimate <- st$running_stats$total_found/st$running_stats$total_visited
row.names(st$running_stats) <- seq(dim(st$running_stats)[1])
st$running_stats$unfoundprevalence <- (st$true.prevalence-st$running_stats$total_found)/(st$total_squares-st$running_stats$total_visited)
if(params[["verbose"]]) {
print(tail(st$running_stats,1))
}
return(st)
}
#Find unvisited houses in the ring around the infected house
RingSearchGetUnvisitedNbs <- function(house, visited,ring) {
max_lat <- dim(visited)[1]
max_lon <- dim(visited)[2]
nbs <- read.csv(text="lon,lat")
for(x in seq(house$lon-ring, house$lon+ring)) {
if (x < 1 | x > max_lon) {
next
}
for(y in seq(house$lat-ring, house$lat+ring)) {
if (y < 1 | y > max_lat) {
next
}
nb = list(lon=x,lat=y) #wishlist: prioritize by range
if (all(nb == house)) {
next
}
if (visited[nb$lat,nb$lon] > 0) {
next
}
nbs <- rbind(nbs, nb)
}
}
return(nbs)
}
#Initialize parameters for ring search
RingSearchInitialize <- function(infestation, params=NULL) {
st <- list()
st$total_squares <- dim(infestation)[1] * dim(infestation)[2]
st$true.prevalence <- sum(colSums(infestation !=0))
st$visited <- matrix(0, nrow=dim(infestation)[1], ncol=dim(infestation)[2])
st$randomized_sites <- melt(st$visited)
st$randomized_sites <- melt(st$visited)
st$randomized_sites <- st$randomized_sites[sample(st$total_squares),]
names(st$randomized_sites)<-c("lat", "lon", "val")
st$randomized_sites$val <- NULL
st$running_stats <- data.frame(total_cost=c(0),total_visited=c(0),total_found=c(0),total_bugs=c(0))
st$known_infested <- read.csv(text="UNICODE")
st$suspected_nbs <- read.csv(text="UNICODE")
st$next_suspected_nb <- 1
st$next_random_idx <- 1
st$visited_coordinates <-read.csv(text="lon,lat")
return(st)
}
##' Random Search
##' Uses the RingSearch Algorithm to select a random search
RandomSearch <- function(infestation, st=NULL, max_actions=Inf, params=NULL, random_search=FALSE) {
return(RingSearch(infestation=infestation, st=st, max_actions=max_actions, params=params, random_search=TRUE))
}
#HELPER FUNCTIONS FOR THE BANDIT
#Function to find reward for bandit
#Current: calculates rewards for a vector of results [number of searches]
#Rewards are log10(1+number of bugs found)
##' @params new_st: state of the bandit off of which rewards are being calculated
##' @params block.size: number of houses being searched for single pull of bandit
BugReward <- function(new_st, block.size) {
last <- tail(new_st$running_stats$total_bugs, (block.size+1)) #using total_bugs instead of total_found
last.reward <- rep(0,block.size)
for (k in 2:(block.size+1)) {
j=k-1
last.reward[j] <- log10(1+last[k]-last[j])
}
return(last.reward)
}
#Option could use houses found as reward
##' @params new_st: state of the bandit off of which rewards are being calculated
##' @params block.size: number of houses being searched for single pull of bandit
HouseReward <- function(new_st, block.size) {
last <- tail(new_st$running_stats$total_found, (block.size+1)) #using total_found
last.reward <- rep(0,block.size)
for (k in 2:(block.size+1)) {
j=k-1
last.reward[j] <- last[k]-last[j]
}
return(last.reward)
}
#Find total bugs: Determines the total bugs and total infested houses found over the block search
##' @params new_st: state of the bandit off of which rewards are being calculated
##' @params block.size: number of houses being searched for single pull of bandit
BugsFound <-function(new_st, block.size) {
last.bug <- tail(new_st$running_stats$total_bugs,(block.size+1))
last.house <- tail(new_st$running_stats$total_found,(block.size+1))
last.bug <-last.bug[block.size+1]-last.bug[1]
last.house <-last.house[block.size+1]-last.house[1]
bugs.found <- (cbind(last.bug,last.house))
return(bugs.found)
}
#Function to find remaining prevalence (can be used for testing)
# UnfoundPrevalence <- function(new_st) {
# unfound <-tail(new_st$running_stats$unfoundprevalence,1)
# return(unfound)
# }
##' Simulate a Bandit search on grid with blocks
##' @params test.time: number of times to run the bandit
##' @params params: infestation grid parameters
##' @params params.arm: arms and prevalence parameters
##' @params infestation: generate new infestations (if NULL), or start with old ones [for benchmarking]
##' each infestation is a rectangular grid
##' @params block.size: number of searches conducted each time an arm is pulled
##' @params SearchStrategy strategy used to search (Ring/Random)
##' @params RewardFunction: function used to calculate rewards for the bandit (takes in new_st, block.size)
##' NOTE: this algorithm is designed to run with a UCB1 bandit or an RC bandit. If additional bandit algorithms are added,
##' addtional code will be needed to initialize that bandit in this function
BanditSearchGridBlocks <- function(test.time=NULL, params=NULL,
block.size=NULL, params.bandit=NULL,
params.arm=NULL, infestations=NULL,
SearchStrategy=NULL,RewardFunction=NULL) {
#Optionally set up infestations
if(is.null(infestations)) {
#GENERATE INFESTATIONS
infestations<- lapply(params.arm, GenerateInfestation,params=params) #new infestations for each arm of the bandit
}
n_arms <- length(infestations)
search_stats <- list()
#Information about the infestations generated (e.g. houses with bugs, total number of bugs on grid)
infestation.stats <-InfestationStats(infestations =infestations) #See Helper functions for further documentation
#SET UP BANDIT ARMS
#initialize function for all infestations
search_stats <- lapply(infestations, SearchStrategy, st=NULL, max_actions=0, params=params)
#Internal function to pull arm of the bandit
pull_arm <- function(chosen_arm, search_stats,block.size, SearchStrategy) {
st <- search_stats[[chosen_arm]]
new_st <- SearchStrategy(data.frame(infestations[[chosen_arm]]),st=st,max_actions=block.size,params=params)
search_stats[[chosen_arm]] <- new_st
return(search_stats)
}
print("Stochastic search with bandit");
#Setting up running tables to keep track of searches
times <- seq(1,test.time)
arms <- rep(0,length(times))
rewards <- rep(0,length(times))
mR <- rep(0,length(times))
# unfoundprev <- rep(0,length(times))
blocking <- rep(0,length(times))
# ps_holding <- matrix(nrow=length(times),ncol=length(params.arm))
bugs.houses <-matrix(nrow=length(times),ncol=2) #uncommented to track
static.houses.infested <-rep(infestation.stats[,"total.houses.infested"],length(times))
static.bugs.available <-rep(infestation.stats[,"total.bugs.available"],length(times))
static.rewards.available <-rep(infestation.stats[,"total.rewards.available"],length(times))
#Initialize the bandit
if (params.bandit$name =="rc") {
bandit <- initialize_rc(n_arms=n_arms, learning_rate=params.bandit$learning_rate, discount_factor=params.bandit$discount_factor)
}
else if (params.bandit$name =="ucb1") {
bandit <- initialize_ucb1(n_arms=n_arms, reward_func=params.bandit$reward_func)
} else if (params.bandit$name=="egreedy") {
bandit <-initialize_greedy(n_arms =n_arms, epsilon=params.bandit$epsilon, reward_func=params.bandit$reward_func)
#### Wishlist: add bandit algorithms besides UCB/RC/egreedy [must also update in "bandit.R"]
} else {
sprintf("Error: cannot call bandit (Bandit type unknown, not specified) %s",params.bandit$name)
}
# print(bandit)
#Now run the search
for (trial in times) {
ps <- NULL
for (arm_idx in seq(n_arms)) {
ps <- c(ps, probability_arm(bandit, arm_idx))
}
# cat("trial: ", trial, " ", "ps: ", ps, "\n")
chosen_arm <- next_arm(bandit)
#print(chosen_arm)
search_stats <- pull_arm(chosen_arm, search_stats,block.size,SearchStrategy)
reward <- RewardFunction(search_stats[[chosen_arm]],block.size)
#reward function of the form RewardFunction(new_st,block.size)
# unfound <- UnfoundPrevalence(search_stats[[chosen_arm]])
bug <- BugsFound(search_stats[[chosen_arm]],block.size)
# cat(" arm: ", chosen_arm, " reward", reward, " unfound", unfound, "total bugs&houses", bug, "\n")
# IMPORTANT: The order in which chiris are presented to the bandit from a block search is randomized
randomizer <- runif(block.size)
random.chiris <-data.frame(cbind(reward,randomizer))
random.chiris <- random.chiris[order(randomizer),]
# print(random.chiris)
#Update the bandit for each time an arm was searched based on the update scheme for the bandit
for (house.counter in block.size) {
# print(random.chiris$reward[block])
bandit <- update_bandit(bandit, chosen_arm, random.chiris$reward[house.counter]) #change bandit preferences
}
# cat(" preferences:", paste(bandit$preferences), "\n")
#Update tables for benchmarking
#Rewards reported are the average reward in the set of pulls
rewards[trial] <- sum(reward)/block.size
arms[trial] <- chosen_arm
# ps_holding[trial,] <-ps
# mR[trial] <- bandit$mean_reward
# unfoundprev[trial] <- unfound
blocking[trial] <-block.size
bugs.houses[trial,] <-bug #uncommented to get total bugs
}
# colnames(ps_holding)=params.arm
colnames(bugs.houses)=c("total.bugs.found","total.infested.houses.found") #uncommented to get total bugs
#Benchmarking funcitons
# results <- data.frame(ps_holding,bugs.houses,
# T=times, ChosenArm=arms, BlockAvgReward=rewards, CumulativeAvgReward=cumsum(rewards*blocking),
# MeanReward=mR, UnfoundPrev.ChosenArm=unfoundprev, BlockSize=blocking,
# static.rewards.available,static.bugs.available,static.houses.infested)
# results$CumulativeHousesFound<-cumsum(results$total.infested.houses.found)
# results$CumulativeBugsFound<-cumsum(results$total.bugs.found)
# results<-sum(rewards*blocking)
results <- data.frame(total.bugs.found=cumsum(bugs.houses[,"total.bugs.found"]),
total.houses.found=cumsum(bugs.houses[,"total.infested.houses.found"]),
zeit=times, ChosenArm=arms, houses.searched=block.size*times,
static.bugs.available,static.houses.infested)
# results<-tail(results,1)
rownames(results) <- NULL
search_stats$results <- results
#Create database of houses searched in order and update latitude. This allows for plotting of searches over time
for (arm.used in 1:n_arms) {
order <- results[which(results$ChosenArm==arm.used),"houses.searched"]
order <-rep(order,10)
order<-sort(order)
order <-cbind(order,search_stats[[arm.used]]$visited_coordinates)
order$chosen.arm <-arm.used
names(order) <-c("order","lat","lon","chosen.arm")
if (arm.used ==1) {
sites.visited <-order
}
else {
sites.visited <-rbind(sites.visited,order)
}
}
sites.visited$combined.lat<-(sites.visited$chosen.arm-1)*params$nrow+sites.visited$lat
sites.visited <-sites.visited[order(sites.visited$order),]
rownames(sites.visited) <- NULL
search_stats$sites.visited <- sites.visited
return(search_stats)
}
|
88dc2fe9de31c28a7ef0fd2f83ced819800f9b23
|
8c13beebb7ca53ef301ebc93076a4dbf9b9b9ca9
|
/R Codes/BIM_Rcode_ORDER.R
|
c4025f2fa4cca71e5d2083f1335d3f189f00b62b
|
[] |
no_license
|
petertea96/Genetic-Association-Methodology-Research
|
933aeb00583f352c159969758d67f94930a42335
|
03943856723a534455a1fcdacb56faa2705f290a
|
refs/heads/master
| 2020-04-08T22:39:45.135568
| 2019-04-10T19:48:48
| 2019-04-10T19:48:48
| 159,796,100
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 522
|
r
|
BIM_Rcode_ORDER.R
|
#Today is July 25th, 2018
#The order() function just takes the (200x200) haplotype distance matrix and reorders the elements
#such that the columns and rows are ordered from haplotype 1 to haplotype 200.
#Example: Individual 1 has haplotype 1 and haplotype 2. Individual 4 has haplotype 8 and haplotype 9.
order = function(my.matrix){
result = matrix(rep(0,200*200), nrow = 200)
for (i in 1:200){
for (j in 1:200){
result[i,j] = my.matrix[as.character(i), as.character(j)]
}
}
return(result)
}
|
747577dc8c34fcefdb8d754542d18eab6fb66548
|
40bfd1969152253992498197f8b2b36e136e3a7b
|
/run_Analysis.R
|
aad7582e84b742328bf7cb2c78fed4e9b5e02be5
|
[] |
no_license
|
lemurey/cleaning-data-project
|
8fa7303a193855c582012a4b4d8edb176f696cb2
|
aca043ca903d5f444d2f70e4ae1424a4d9a7d04e
|
refs/heads/master
| 2020-06-03T14:32:59.966380
| 2015-03-22T16:33:04
| 2015-03-22T16:33:04
| 32,684,190
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,336
|
r
|
run_Analysis.R
|
## Change this to the working directory that contains the UCI HAR dataset folder
setwd('valid path to UCI HAR dataset')
## This gets the list of files (with path) that need to be imported.
train_files <- paste0('./train/',
list.files(path = './train',
pattern='.*\\.txt'))
test_files <- paste0('./test/',
list.files(path = './test',
pattern='.*\\.txt'))
## this reads in the files grabbed above.
trainread <- do.call('cbind',lapply(train_files,read.table))
testread <- do.call('cbind',lapply(test_files,read.table))
## bind the training and test sets together
merged <- rbind(trainread,testread)
## add the names into the dataset, grabbing the names from the features list
## in the HAR data folder use make.names and gsub to make them syntactically
## valid and easier to type
features <- read.table('./features.txt')
featnames <- gsub(pattern = '\\.{2}',
replacement = '',
make.names(features$V2))
namescol <- c('subject',featnames,'activity')
colnames(merged) <- namescol
## this grabs the indices of any of the features which have mean or std in their
## name then subset the data based on these results
meanfeatures <- grep(pattern = '.*mean.*',features$V2)
stdfeatures <- grep(pattern = '.*std.*',features$V2)
merged <- merged[,c(1,meanfeatures+1,stdfeatures+1,563)]
## this transforms activity from numbers to the names of the activites
activities <- read.table('./activity_labels.txt')
merged$activity <- as.factor(merged$activity)
levels(merged$activity) <- activities$V2
## here we create the tidy data set
tidydata <- NULL
for (i in 1:30) {
tempdata <- merged[merged$subject==i,]
tempvals <- aggregate(tempdata[,-81],
list('activity' = tempdata$activity),
mean)
tidydata <- rbind(tidydata,tempvals)
}
## add on a marker of which data came from the test or training set
trainsub <- unique(read.table('./train/subject_train.txt'))
testsub <- unique(read.table('./test/subject_test.txt'))
tidydata$dataset[tidydata$subject %in% trainsub$V1] <- 'train'
tidydata$dataset[tidydata$subject %in% testsub$V1] <- 'test'
##write the tidy data set to a file
write.table(tidydata,'tidydata.txt',row.names=FALSE)
|
1f5f7c59077595b97b70efea5c0c84d4660e4075
|
032bfab14ff7d77349273c52ac18a181a9996192
|
/cachematrix.R
|
7c341c4cdd210fa8527be08f46da53c4fca5f335
|
[] |
no_license
|
davidcarnahan/ProgrammingAssignment2
|
56de8ce9d614fb82df33f09a2dd52602c5e1390e
|
c99e662555332fb9813a5baa8d09d14879290cc2
|
refs/heads/master
| 2021-01-15T13:00:37.712046
| 2016-08-25T05:25:21
| 2016-08-25T05:25:21
| 66,516,805
| 0
| 0
| null | 2016-08-25T02:25:38
| 2016-08-25T02:25:37
| null |
UTF-8
|
R
| false
| false
| 1,076
|
r
|
cachematrix.R
|
## Matrix inversion can be a costly compution -- so caching may be of signficant benefit.
## The two functions below will 1) create a special matrix object that can cache its inverse, and
## 2) either compute/retrieve the inversion depending on whether it has been calculated already.
## This function creates a special 'matrix' object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
invMat <- NULL
set <- function(y) {
x <<- y
invMat <<- NULL
}
get <- function() x
setinverse <- function(inverse) invMat <<- inverse
getinverse <- function() invMat
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special matrix above; unless it has already been created.
## In the latter case, it will be retrieved from cache.
cacheSolve <- function(x, ...) {
invMat <- x$getinverse()
if(!is.null(invMat)) {
message("Getting cached data!")
return(invMat)
}
d <- x$get()
invMat <- solve(d)
x$setinverse(invMat)
invMat
}
|
22b6423b17af34884eaad388bf62436773c15c40
|
007a5459cc41c25d6450fdb2d6ac331587b6b439
|
/R/data.R
|
537b9ac509ccee0a0645c18fa90ebc3e0f4c88f7
|
[] |
no_license
|
Edouard-Legoupil/APLA_Dataset
|
7637fa1f6b29def80ee3b9e9a8c28478a7106575
|
338c79e20ceb9be38b77de685ba76bfbe90658f4
|
refs/heads/main
| 2022-12-31T12:11:27.220906
| 2020-10-12T20:29:28
| 2020-10-12T20:29:28
| 303,468,494
| 0
| 0
| null | 2020-10-12T17:44:37
| 2020-10-12T17:44:36
| null |
UTF-8
|
R
| false
| false
| 34,664
|
r
|
data.R
|
#' @title APLA_Database
#' @description APLA Database is the original APLA Dataset
#' @format A data frame with 551 rows and 262 variables:
#' \describe{
#' \item{\code{Q1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q2}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q5#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q5#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q5#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q5#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q7#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q7#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q7#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q7#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q9#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q9#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q9#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q9#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q11#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q11#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q11#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q11#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q13#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q13#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q13#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q13#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q15#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q15#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q15#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q15#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q17#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q17#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q17#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q17#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q19#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q19#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q19#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q19#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q200#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q200#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q200#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q200#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q202#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q202#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q202#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q202#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q204#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q204#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q204#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q204#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q278#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q278#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q278#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q278#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q280#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q280#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q280#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q280#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q133#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q133#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q133#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q133#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q274#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q274#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q274#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q274#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q188#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q188#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q188#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q188#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q170#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q170#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q170#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q170#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q172#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q172#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q172#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q172#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q174#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q174#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q174#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q174#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q180#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q180#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q180#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q180#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q184#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q184#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q184#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q184#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q101#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q101#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q101#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q101#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q103#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q103#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q103#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q103#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q107#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q107#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q107#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q107#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q113#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q113#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q113#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q113#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q109#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q109#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q109#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q109#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q253#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q253#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q253#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q253#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q208#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q208#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q208#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q208#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q145#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q145#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q145#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q145#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q190#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q190#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q190#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q190#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q194#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q194#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q194#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q194#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q320#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q320#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q320#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q320#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q54#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q54#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q54#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q54#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q58#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q58#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q58#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q58#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA1#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA1#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA1#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA1#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA3#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA3#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA3#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA3#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA5#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA5#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA5#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA5#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA7#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA7#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA7#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA7#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA9#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA9#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA9#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA9#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA11#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA11#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA11#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA11#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA13#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA13#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA13#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA13#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA15#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA15#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA15#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA15#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA17#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA17#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA17#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA17#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA19#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA19#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA19#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA19#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA23#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA23#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA23#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA23#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA27#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA27#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA27#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA27#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA29#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA29#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA29#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA29#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA31#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA31#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA31#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA31#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA33#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA33#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA33#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA33#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA35#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA35#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA35#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA35#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA37#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA37#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA37#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA37#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA39#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA39#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA39#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA39#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA41#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA41#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA41#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA41#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA43#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA43#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA43#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA43#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA45#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA45#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA45#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA45#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA47#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA47#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA47#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA47#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA49#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA49#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA49#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA49#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA51#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA51#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA51#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA51#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA53#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA53#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA53#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA53#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA55#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA55#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA55#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA55#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA57#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA57#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA57#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA57#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA59#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA59#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA59#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA59#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA61#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA61#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA61#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA61#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA63#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA63#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA63#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA63#4_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA65#1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA65#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA65#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA65#4_1_1}}{character COLUMN_DESCRIPTION}
#'}
#' @source \url{http://somewhere.important.com/}
"APLA_Database"
#' @title APLA_Regulatory_Complexity
#' @description and APLA Regulatory Complexity includes the files where the calculation of
#' the two dependent variables was conducted
#' @format A data frame with 552 rows and 135 variables:
#' \describe{
#' \item{\code{Q1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q2}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q5#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q5#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q7#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q7#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q9#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q9#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q11#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q11#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q13#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q13#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q15#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q15#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q17#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q17#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q19#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q19#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q200#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q200#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q202#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q202#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q204#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q204#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q278#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q278#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q280#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q280#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q133#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q133#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q274#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q274#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q188#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q188#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q170#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q170#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q172#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q172#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q174#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q174#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q180#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q180#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q184#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q184#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q101#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q101#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q103#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q103#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q107#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q107#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q113#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q113#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q109#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q109#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q253#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q253#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q208#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q208#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q145#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q145#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q190#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q190#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q194#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q194#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q320#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q320#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q54#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q54#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q58#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q58#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA1#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA1#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA3#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA3#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA5#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA5#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA7#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA7#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA9#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA9#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA11#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA11#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA13#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA13#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA15#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA15#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA17#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA17#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA19#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA19#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA23#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA23#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA27#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA27#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA29#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA29#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA31#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA31#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA33#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA33#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA35#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA35#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA37#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA37#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA39#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA39#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA41#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA41#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA43#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA43#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA45#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA45#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA47#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA47#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA49#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA49#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA51#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA51#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA53#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA53#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA55#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA55#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA57#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA57#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA59#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA59#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA61#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA61#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA63#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA63#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA65#2_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{LA65#3_1_1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Art}}{double COLUMN_DESCRIPTION}
#' \item{\code{Total}}{double COLUMN_DESCRIPTION}
#' \item{\code{Percentage_Art}}{double COLUMN_DESCRIPTION}
#'}
#' @source \url{http://somewhere.important.com/}
"APLA_Regulatory_Complexity"
#' @title APLA_Liberalisation
#' @description APLA Liberalisation include the files where the calculation
#' of the two dependent variables was conducted
#' @format A data frame with 552 rows and 71 variables:
#' \describe{
#' \item{\code{Q1}}{character COLUMN_DESCRIPTION}
#' \item{\code{Q2}}{double COLUMN_DESCRIPTION}
#' \item{\code{Q200#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{Q202#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{Q204#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{Q278#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{Q280#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{Q133#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{Q274#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{Q188#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{Q170#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{Q172#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{Q174#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{Q180#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{Q184#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{Q101#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{Q103#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{Q107#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{Q113#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{Q109#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{Q253#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{Q208#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{Q145#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{Q190#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{Q194#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{Q320#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{Q54#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{Q58#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA1#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA3#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA5#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA7#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA9#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA11#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA13#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA15#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA17#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA19#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA23#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA27#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA29#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA31#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA33#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA35#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA37#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA39#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA41#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA43#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA45#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA47#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA49#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA51#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA53#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA55#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA57#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA59#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA61#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA63#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{LA65#2_1}}{double COLUMN_DESCRIPTION}
#' \item{\code{...60}}{logical COLUMN_DESCRIPTION}
#' \item{\code{TOTAL1}}{double COLUMN_DESCRIPTION}
#' \item{\code{TOTAL0}}{double COLUMN_DESCRIPTION}
#' \item{\code{...63}}{logical COLUMN_DESCRIPTION}
#' \item{\code{Total_10}}{double COLUMN_DESCRIPTION}
#' \item{\code{...65}}{logical COLUMN_DESCRIPTION}
#' \item{\code{Ratio1toTotal}}{double COLUMN_DESCRIPTION}
#' \item{\code{...67}}{logical COLUMN_DESCRIPTION}
#' \item{\code{Liberalisation}}{double COLUMN_DESCRIPTION}
#' \item{\code{Formula0stoTotalinYear}}{logical COLUMN_DESCRIPTION}
#' \item{\code{Restrictiveness}}{double COLUMN_DESCRIPTION}
#' \item{\code{Formula:1stoTotalinaYear}}{character COLUMN_DESCRIPTION}
#'}
#' @source \url{http://somewhere.important.com/}
"APLA_Liberalisation"
#' @title Data_APLA
#' @description Data_APLA contains data that includes APLA's regulatory complexity
#' and liberalisation scores, as well as a number of relevant variables that
#' are used in further research.
#' @format A data frame with 551 rows and 33 variables:
#' \describe{
#' \item{\code{Country}}{character COLUMN_DESCRIPTION}
#' \item{\code{Year}}{double COLUMN_DESCRIPTION}
#' \item{\code{Art}}{double COLUMN_DESCRIPTION}
#' \item{\code{Total}}{double COLUMN_DESCRIPTION}
#' \item{\code{Regulatory_Complexity}}{double COLUMN_DESCRIPTION}
#' \item{\code{Polity2}}{double COLUMN_DESCRIPTION}
#' \item{\code{IntMigStock}}{double COLUMN_DESCRIPTION}
#' \item{\code{Left1_Other0}}{double COLUMN_DESCRIPTION}
#' \item{\code{Liberalisation}}{double COLUMN_DESCRIPTION}
#' \item{\code{MigSpain}}{double COLUMN_DESCRIPTION}
#' \item{\code{MigUS}}{double COLUMN_DESCRIPTION}
#' \item{\code{MigSpainUS}}{double COLUMN_DESCRIPTION}
#' \item{\code{TotalPopinY}}{double COLUMN_DESCRIPTION}
#' \item{\code{MigSpainUSPerc}}{double COLUMN_DESCRIPTION}
#' \item{\code{GrowthGDPperCap}}{double COLUMN_DESCRIPTION}
#' \item{\code{Trade_Perc_GDP}}{double COLUMN_DESCRIPTION}
#' \item{\code{RefugeeAndLikeSit}}{double COLUMN_DESCRIPTION}
#' \item{\code{TotalPop}}{double COLUMN_DESCRIPTION}
#' \item{\code{RefAsPerc}}{double COLUMN_DESCRIPTION}
#' \item{\code{GDPperCapPPP}}{double COLUMN_DESCRIPTION}
#' \item{\code{Lib100}}{double COLUMN_DESCRIPTION}
#' \item{\code{x}}{double COLUMN_DESCRIPTION}
#' \item{\code{y}}{double COLUMN_DESCRIPTION}
#' \item{\code{CountryID}}{character COLUMN_DESCRIPTION}
#' \item{\code{South_America}}{double COLUMN_DESCRIPTION}
#' \item{\code{MigSpainUSLog}}{double COLUMN_DESCRIPTION}
#' \item{\code{lag1}}{double COLUMN_DESCRIPTION}
#' \item{\code{lag2}}{double COLUMN_DESCRIPTION}
#' \item{\code{_est_est1}}{double COLUMN_DESCRIPTION}
#' \item{\code{_est_est2}}{double COLUMN_DESCRIPTION}
#' \item{\code{_est_est3}}{double COLUMN_DESCRIPTION}
#' \item{\code{_est_est4}}{double COLUMN_DESCRIPTION}
#' \item{\code{VDEM_Polyarchy}}{double COLUMN_DESCRIPTION}
#'}
#' @source \url{http://somewhere.important.com/}
"Data_APLA"
#' @title Data_APLA_1
#' @description Data_APLA_1 is slightly modified versions of the dataset Data_APLA,
#' which allows to better plot or chart certain variables.
#' @format A data frame with 580 rows and 33 variables:
#' \describe{
#' \item{\code{Country}}{character COLUMN_DESCRIPTION}
#' \item{\code{Year}}{double COLUMN_DESCRIPTION}
#' \item{\code{Art}}{double COLUMN_DESCRIPTION}
#' \item{\code{Total}}{double COLUMN_DESCRIPTION}
#' \item{\code{Regulatory_Complexity}}{double COLUMN_DESCRIPTION}
#' \item{\code{Polity2}}{double COLUMN_DESCRIPTION}
#' \item{\code{IntMigStock}}{double COLUMN_DESCRIPTION}
#' \item{\code{Left1_Other0}}{double COLUMN_DESCRIPTION}
#' \item{\code{Liberalisation}}{double COLUMN_DESCRIPTION}
#' \item{\code{MigSpain}}{double COLUMN_DESCRIPTION}
#' \item{\code{MigUS}}{double COLUMN_DESCRIPTION}
#' \item{\code{MigSpainUS}}{double COLUMN_DESCRIPTION}
#' \item{\code{TotalPopinY}}{double COLUMN_DESCRIPTION}
#' \item{\code{MigSpainUSPerc}}{double COLUMN_DESCRIPTION}
#' \item{\code{GrowthGDPperCap}}{double COLUMN_DESCRIPTION}
#' \item{\code{Trade_Perc_GDP}}{double COLUMN_DESCRIPTION}
#' \item{\code{RefugeeAndLikeSit}}{double COLUMN_DESCRIPTION}
#' \item{\code{TotalPop}}{double COLUMN_DESCRIPTION}
#' \item{\code{RefAsPerc}}{double COLUMN_DESCRIPTION}
#' \item{\code{GDPperCapPPP}}{double COLUMN_DESCRIPTION}
#' \item{\code{Lib100}}{double COLUMN_DESCRIPTION}
#' \item{\code{x}}{double COLUMN_DESCRIPTION}
#' \item{\code{y}}{double COLUMN_DESCRIPTION}
#' \item{\code{CountryID}}{character COLUMN_DESCRIPTION}
#' \item{\code{South_America}}{double COLUMN_DESCRIPTION}
#' \item{\code{MigSpainUSLog}}{double COLUMN_DESCRIPTION}
#' \item{\code{lag1}}{double COLUMN_DESCRIPTION}
#' \item{\code{lag2}}{double COLUMN_DESCRIPTION}
#' \item{\code{_est_est1}}{double COLUMN_DESCRIPTION}
#' \item{\code{_est_est2}}{double COLUMN_DESCRIPTION}
#' \item{\code{_est_est3}}{double COLUMN_DESCRIPTION}
#' \item{\code{_est_est4}}{double COLUMN_DESCRIPTION}
#' \item{\code{VDEM_Polyarchy}}{double COLUMN_DESCRIPTION}
#'}
#' @source \url{http://somewhere.important.com/}
"Data_APLA_1"
#' @title APLA_Map
#' @description APLA_Map is slightly modified versions of the dataset Data_APLA,
#' which allows to better map certain variables.
#' @format A data frame with 667 rows and 34 variables:
#' \describe{
#' \item{\code{Country}}{character COLUMN_DESCRIPTION}
#' \item{\code{Year}}{integer COLUMN_DESCRIPTION}
#' \item{\code{Art}}{integer COLUMN_DESCRIPTION}
#' \item{\code{Total}}{integer COLUMN_DESCRIPTION}
#' \item{\code{Regulatory_Complexity}}{integer COLUMN_DESCRIPTION}
#' \item{\code{Polity2}}{integer COLUMN_DESCRIPTION}
#' \item{\code{IntMigStock}}{double COLUMN_DESCRIPTION}
#' \item{\code{Left1_Other0}}{integer COLUMN_DESCRIPTION}
#' \item{\code{Liberalisation}}{double COLUMN_DESCRIPTION}
#' \item{\code{MigSpain}}{integer COLUMN_DESCRIPTION}
#' \item{\code{MigUS}}{integer COLUMN_DESCRIPTION}
#' \item{\code{MigSpainUS}}{integer COLUMN_DESCRIPTION}
#' \item{\code{TotalPopinY}}{integer COLUMN_DESCRIPTION}
#' \item{\code{MigSpainUSPerc}}{double COLUMN_DESCRIPTION}
#' \item{\code{GrowthGDPperCap}}{double COLUMN_DESCRIPTION}
#' \item{\code{Trade_Perc_GDP}}{integer COLUMN_DESCRIPTION}
#' \item{\code{RefugeeAndLikeSit}}{integer COLUMN_DESCRIPTION}
#' \item{\code{T}}{integer COLUMN_DESCRIPTION}
#' \item{\code{RefAsPerc}}{double COLUMN_DESCRIPTION}
#' \item{\code{GDPperCapPPP}}{double COLUMN_DESCRIPTION}
#' \item{\code{Lib100}}{integer COLUMN_DESCRIPTION}
#' \item{\code{x}}{double COLUMN_DESCRIPTION}
#' \item{\code{y}}{double COLUMN_DESCRIPTION}
#' \item{\code{CountryID}}{character COLUMN_DESCRIPTION}
#' \item{\code{South_America}}{integer COLUMN_DESCRIPTION}
#' \item{\code{MigSpainUSLog}}{double COLUMN_DESCRIPTION}
#' \item{\code{lag1}}{integer COLUMN_DESCRIPTION}
#' \item{\code{lag2}}{integer COLUMN_DESCRIPTION}
#' \item{\code{X_est_est1}}{integer COLUMN_DESCRIPTION}
#' \item{\code{X_est_est2}}{integer COLUMN_DESCRIPTION}
#' \item{\code{X_est_est3}}{integer COLUMN_DESCRIPTION}
#' \item{\code{X_est_est4}}{integer COLUMN_DESCRIPTION}
#' \item{\code{VDEM_Polyarchy}}{double COLUMN_DESCRIPTION}
#' \item{\code{Codified}}{integer COLUMN_DESCRIPTION}
#'}
#' @source \url{http://somewhere.important.com/}
"APLA_Map"
|
cd5776c80df4128d96cc3635f0c1da5b5fa73293
|
ec7dd158dd44c0be69dc2f7d1466b21fa70a563b
|
/R/zzz.R
|
f7770aed6c999a5a976f94997bd0b7b5adbab422
|
[
"Apache-2.0"
] |
permissive
|
snp/LFQ
|
1089f9bf1c72fb6e404a4a084be7a81d7fb0f91f
|
404f714c5e065253e9051341aced6be56e6d1245
|
refs/heads/master
| 2020-05-27T22:04:24.391185
| 2018-06-29T08:17:57
| 2018-06-29T08:17:57
| 82,574,979
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 104
|
r
|
zzz.R
|
.onAttach <- function(libname, pkgname) {
packageStartupMessage("Welcome to LFQ Proteomics package")
}
|
cbf12d035652a15180b37bab711ba6d898174f58
|
0059bd90bf026c9ea589e0a227f1db415d453864
|
/baseline/aknn.R
|
dfaf2bdb3f3560d860b954421fa4dddec94056cf
|
[] |
no_license
|
stavnycha/master_thesis
|
cd1f221170c89fa5e0d896ae0db7b604e853da70
|
eabf9360c86eaed098cc962b452fbbacef947281
|
refs/heads/master
| 2020-03-25T12:14:42.759870
| 2015-08-03T18:47:11
| 2015-08-03T18:47:11
| 40,141,418
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,280
|
r
|
aknn.R
|
library(ggplot2)
library(tm)
library(SnowballC)
library("clue")
library("parallel")
library(gsubfn)
library("XLConnect")
library(kohonen)
library(e1071)
library(kknn)
library(clv)
library(stringr)
library(lsa)
folder.root <- "."
setwd("D:/tartuUniversity/for_ms/baseline/scripts")
source('./aknn_cosine.r')
setwd("D:/tartuUniversity/for_ms")
data <- read.csv(file="fortumo_data_with_time_eng_closed_processed.csv",head=TRUE,sep=",")
data$eventualID <- 1:nrow(data)
data <- data[data$is_estonian == FALSE, ]
data <- data[order(data$created), ]
rownames(data) <- as.character(1:nrow(data))
corpus_descriptions = Corpus(VectorSource(as.character(data$description)))
corpus_titles = Corpus(VectorSource(as.character(data$title)))
data$timespent_hours <- data$timespent / 3600.0
data$timespent_seconds <- data$timespent
data[data$timespent_seconds <= 1800, ]$timespent_hours <- 0.5
data[data$timespent_hours == 0.5,]$timespent <- "0.5"
data[data$timespent_hours == 1,]$timespent <- "1"
data[data$timespent_hours >= 2 & data$timespent_hours < 4,]$timespent = "2-3"
data[data$timespent_hours >= 4 & data$timespent_hours < 7,]$timespent = "4-6"
data[data$timespent_hours >= 7 & data$timespent_hours < 12,]$timespent = "7-11"
data[data$timespent_hours >= 12 & data$timespent_hours < 21,]$timespent = "12-20"
data[data$timespent_hours >= 21 & data$timespent_hours < 41,]$timespent = "21-40"
data[data$timespent_hours > 40,]$timespent = ">40"
levels = c("0.5", "1", "2-3", "4-6", "7-11", "12-20", "21-40", ">40")
data$timespent = factor(data$timespent, levels = levels)
data$created <- as.numeric(as.Date(as.character(data$created)))
setwd("D:/tartuUniversity/for_ms/baseline")
errranges_relative <- c(0.1, 0.25, 0.3, 0.33, 0.5)
errranges_absolute <- c(1,3,5)
calculate <- function(i){
print(i)
res_path = paste(error_ranges_graph, "classification_description",i,".csv",sep="")
#if (file.exists(res_path) == TRUE){
if (FALSE) {
print("reading existing full set of data...")
rM <- read.csv(res_path, header = TRUE, sep = ";", row.names = 1)
} else {
firstrow <- 1
rows <- i
lastrow <- firstrow + rows - 1
rM <- as.data.frame(matrix(nrow=0,ncol=18, dimnames=list(NULL,c("time.passed",
"model", "inrange", "terms", "train", "test", "a", "minsparse",
"maxsparse", "mintfidf", "maxtfidf", "kendall", "spearman", "pearson", "abs",
"actual", "predicted", "rel" ))))
involved_data <- data[firstrow:lastrow, ]
#CHECK weighting!!
bug.tdm_desc0 <- DocumentTermMatrix(corpus_descriptions[firstrow:lastrow], control = list(minWordLength = 2, maxWordLength=100, weighting=weightTfIdf))
bug.tdm_desc <- removeSparseTerms(bug.tdm_desc0, sparse=0.995)
bug.terms_desc <- Terms(bug.tdm_desc)
bug.tdm_title <- DocumentTermMatrix(corpus_titles[firstrow:lastrow], control = list(minWordLength = 2, maxWordLength=100, weighting=weightTfIdf))
#bug.tdm_title <- removeSparseTerms(bug.tdm_title, sparse=0.995)
bug.terms_title <- Terms(bug.tdm_title)
bug.terms.minsparse <- min(apply(as.matrix(bug.tdm_desc), 2, function(x){sum(as.numeric(x > 0))}))/nrow(bug.tdm_desc)
bug.terms.maxsparse <- max(apply(as.matrix(bug.tdm_desc), 2, function(x){sum(as.numeric(x > 0))}))/nrow(bug.tdm_desc)
bug.terms.mintfidf <- min(apply(as.matrix(bug.tdm_desc), 2, sum))
bug.terms.maxtfidf <- min(apply(as.matrix(bug.tdm_desc), 2, sum))
rt_data <- data[c("id", "timespent", "timespent_hours")][firstrow:lastrow, ]
colnames(rt_data) <- c("ID", "ResolutionTime", "ResolutionTimeHours")
predicted_size <- 1
break_point <- (nrow(bug.tdm_desc) - predicted_size)
predict_path <- paste(error_ranges_graph, "bug.predict",i,".csv",sep="")
if (file.exists(predict_path) == TRUE){
print("reading single predicting...")
bug.predict.classonly_full <- read.csv(predict_path, header = TRUE, sep = ";", row.names = 1, dec = ",")
} else {
bug.predict.desc <- aknn_cosines(bug.tdm_desc, a_vector, rt_data)
bug.predict.title <- aknn_cosines(bug.tdm_title, a_vector, rt_data)
bug.predict.classonly_full <- bug.predict.title
for (j in 1:length(a_vector)){
if (!is.na(bug.predict.desc[j, ]$predicted)){
if (!is.na(bug.predict.classonly_full[j, ]$predicted)){
bug.predict.classonly_full[j, ]$predicted = (
(bug.predict.classonly_full[j, ]$predicted + bug.predict.desc[j, ]$predicted)/2
)
}
}
}
print('algorythm done')
write.csv2(as.matrix(bug.predict.classonly_full),paste(error_ranges_graph, "bug.predict",i,".csv",sep=""))
}
for (a in a_vector){
bug.predict.classonly <- bug.predict.classonly_full[bug.predict.classonly_full$a == a, ]$predicted
bug.predict.classonly.aerr <- abs(bug.predict.classonly - rt_data[(1 + break_point):nrow(rt_data),3])
temp <- data.frame(bug.predict.classonly.aerr, rt_data[(1 + break_point):nrow(rt_data),3])
colnames(temp) <- c("error", "actual")
#temp <- temp[complete.cases(temp),]
bug.predict.classonly.rerr <- temp$error / temp$actual
for(errrange in errranges_relative) {
inrange <- sum(as.numeric(bug.predict.classonly.rerr <= errrange))/length(bug.predict.classonly.rerr)
rM[nrow(rM)+1, c("time.passed", "model", "inrange",
"terms", "train", "test", "a", "minsparse",
"maxsparse", "mintfidf", "maxtfidf", "kendall", "spearman", "pearson", "abs", "actual",
"predicted", "rel")] <-
c(i, paste("RE <", errrange), inrange, length(bug.terms_desc), nrow(bug.tdm_desc) - predicted_size, predicted_size, a,
bug.terms.minsparse,
bug.terms.maxsparse, bug.terms.mintfidf, bug.terms.maxtfidf, NA, NA, NA, bug.predict.classonly.aerr,
rt_data[nrow(rt_data),3], as.character(bug.predict.classonly),
bug.predict.classonly.rerr)
}
for(errrange in errranges_absolute) {
inrange <- sum(as.numeric(bug.predict.classonly.aerr <= errrange))/length(bug.predict.classonly.aerr)
rM[nrow(rM)+1, c("time.passed", "model", "inrange",
"terms", "train", "test", "a", "minsparse",
"maxsparse", "mintfidf", "maxtfidf", "kendall", "spearman", "pearson", "abs", "actual",
"predicted", "rel")] <-
c(i, paste("AE <", errrange), inrange, length(bug.terms_desc), nrow(bug.tdm_desc) - predicted_size, predicted_size, a,
bug.terms.minsparse,
bug.terms.maxsparse, bug.terms.mintfidf, bug.terms.maxtfidf, NA, NA, NA, bug.predict.classonly.aerr,
rt_data[nrow(rt_data),3], as.character(bug.predict.classonly),
bug.predict.classonly.rerr)
}
}
path = paste(error_ranges_graph, "classification_description",i,".csv",sep="")
print(path)
write.csv2(rM, path)
gc()
}
print(rM)
return(rM)
}
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
bunches = c(50, 100, 200)
a_vector <- c(0.05, 0.1, 0.2, 0.3, 0.5, 0.7)
fun = 'aknn'
folder_name = paste(fun)
error_ranges_graph <- paste(folder.root, folder_name, "error ranges ", sep="/")
error_ranges_graph <- paste(error_ranges_graph)
base_path <- paste(folder.root, folder_name, sep="/")
print(error_ranges_graph)
print(base_path)
validities_path <- paste(error_ranges_graph, " all_validities.csv", sep="")
unlink(validities_path)
print(validities_path)
all_data_path <- paste(error_ranges_graph, "all_data.csv")
print(all_data_path)
if (file.exists(all_data_path) == TRUE){
print("reading existing full set of data...")
rM_full <- read.csv(all_data_path, header = TRUE, sep = ",", row.names = 1)
} else {
unlink(validities_path)
rL <- mclapply(50:nrow(data), calculate)
rM_full <- rL[[1]]
for(i in 2:length(rL)) {
if(is.data.frame(rL[[i]]))
rM_full <- rbind(rM_full, rL[[i]])
else
print(rL[[i]])
}
write.csv(file=all_data_path, x=rM_full)
}
for(i in c(1,3:11, 15:18)) {
rM_full[,i] <- as.numeric(rM_full[,i])
}
res <- as.data.frame(matrix(nrow=0,ncol=4, dimnames=list(NULL,c("model", "average value", "a", "prediction rate"))))
for (a in a_vector) {
rM <- rM_full[rM_full$a == a, ]
rM$predicted_class <- hours_to_class(rM$predicted)
rM$abs_distance <- distances_range_hour(rM$predicted_class, rM$actual)
rM$rel_distance <- rM$abs_distance/rM$actual
for (errrange in errranges_relative){
rM_temp <- rM[rM$model == paste("RE <", errrange), ]
rM_temp_not_na <- rM_temp[!is.na(rM_temp$predicted), ]
apqre <- nrow(rM_temp_not_na[rM_temp_not_na$rel_distance <= errrange, ])/nrow(rM_temp_not_na)
prediction_rate <- nrow(rM_temp_not_na)/nrow(rM_temp)
res[nrow(res) + 1, c("model", "average value", "a", "prediction rate")] <- c(paste("RE <", errrange),
apqre, a, prediction_rate)
}
for (errrange in errranges_absolute){
rM_temp <- rM[rM$model == paste("AE <", errrange), ]
rM_temp_not_na <- rM_temp[!is.na(rM_temp$predicted), ]
apqre <- nrow(rM_temp_not_na[rM_temp_not_na$abs_distance <= errrange, ])/nrow(rM_temp_not_na)
prediction_rate <- nrow(rM_temp_not_na)/nrow(rM_temp)
res[nrow(res) + 1, c("model", "average value", "a", "prediction rate")] <- c(paste("AE <", errrange),
apqre, a, prediction_rate)
}
}
for (k in k_vector) {
smoothings = c(10, 50)
for (smoothing in smoothings){
for (err in c("RE < 0.1", "RE < 0.25", "RE < 0.33", "AE < 1", "AE < 3", "AE < 5")) {
table <- rM[rM$model == err,]
smoothing_inrange <- smooth_data(table$inrange, smoothing)
smoothing_inrange_factors <- smooth_data(table$inrange_factors, smoothing)
table$smoothing_inrange <- smoothing_inrange
table$smoothing_factors <- smoothing_inrange_factors
my.p <- ggplot(table)
my.p <- my.p + geom_point(mapping=aes(x=time.passed, y=inrange), size=1.1) +
scale_x_continuous(limits=c(1,nrow(data)), breaks=(1:23)*100) + ggtitle(paste("Percentage of issues with", err, ", smoothing by", smoothing))
my.p <- my.p + geom_line(mapping=aes(x=time.passed, y=smoothing_inrange), color="#9999CC", size=1.1)
path <- paste(base_path, "/factor-number/", "error ranges predictive quality smoothing by "
,smoothing, ", ", gsub("<","l",err), ".png", sep="")
ggsave(filename = path, plot = my.p, width = 50, height = 5, units = "in", limitsize=FALSE)
my.p <- ggplot(table)
my.p <- my.p + geom_point(mapping=aes(x=time.passed, y=inrange), size=1.1) +
scale_x_continuous(limits=c(1,nrow(data)), breaks=(1:23)*100) + ggtitle(paste("Percentage of issues with", err, ", smoothing by", smoothing))
my.p <- my.p + geom_line(mapping=aes(x=time.passed, y=smoothing_inrange_factors), color="#9999CC", size=1.1)
path <- paste(base_path, "/factors/", "error ranges predictive quality smoothing by "
,smoothing, ", ", gsub("<","l",err), ".png", sep="")
ggsave(filename = path, plot = my.p, width = 50, height = 5, units = "in", limitsize=FALSE)
}
shortened_table <- rM[rM$model == "RE < 0.1",] # whatever model, just 1 line per result is needed
shortened_table$smoothing_abs <- smooth_data(shortened_table$abs, smoothing)
shortened_table$smoothing_abs_factors <- smooth_data(shortened_table$abs_factors, smoothing)
shortened_table$smoothing_rel <- smooth_data(shortened_table$rel, smoothing)
shortened_table$smoothing_rel_factors <- smooth_data(shortened_table$rel_factors, smoothing)
my.p <- ggplot(shortened_table)
my.p <- my.p + geom_line(mapping=aes(x=time.passed, y=abs), color="#AAAAAA") +
scale_x_continuous(limits=c(1,nrow(data)), breaks=(1:23)*100) + ggtitle(paste("Absolute error, smoothing by", smoothing))
my.p <- my.p + geom_line(mapping=aes(x=time.passed, y=smoothing_abs), color="#6600CC", size=1.1)
path <- paste(base_path, "/factor-number/", "error ranges AE smoothing by "
,smoothing, ".png", sep="")
ggsave(filename = path, plot = my.p, width = 50, height = 5, units = "in", limitsize=FALSE)
my.p <- ggplot(shortened_table)
my.p <- my.p + geom_line(mapping=aes(x=time.passed, y=abs_factors), color="#AAAAAA") +
scale_x_continuous(limits=c(1,nrow(data)), breaks=(1:23)*100) + ggtitle(paste("Absolute error, smoothing by", smoothing))
my.p <- my.p + geom_line(mapping=aes(x=time.passed, y=smoothing_abs_factors), color="#6600CC", size=1.1)
path <- paste(base_path, "/factors/", "error ranges AE smoothing by "
,smoothing, ".png", sep="")
ggsave(filename = path, plot = my.p, width = 50, height = 5, units = "in", limitsize=FALSE)
my.p <- ggplot(shortened_table)
my.p <- my.p + geom_line(mapping=aes(x=time.passed, y=rel), color="#AAAAAA") +
scale_x_continuous(limits=c(1,nrow(data)), breaks=(1:23)*100) + ggtitle(paste("Relative error, smoothing by", smoothing))
my.p <- my.p + geom_line(mapping=aes(x=time.passed, y=smoothing_rel), color="#6600CC", size=1.1)
path <- paste(base_path, "/factor-number/", "error ranges RE smoothing by "
,smoothing, ".png", sep="")
ggsave(filename = path, plot = my.p, width = 50, height = 5, units = "in", limitsize=FALSE)
my.p <- ggplot(shortened_table)
my.p <- my.p + geom_line(mapping=aes(x=time.passed, y=rel_factors), color="#AAAAAA") +
scale_x_continuous(limits=c(1,nrow(data)), breaks=(1:23)*100) + ggtitle(paste("Relative error, smoothing by", smoothing))
my.p <- my.p + geom_line(mapping=aes(x=time.passed, y=smoothing_rel_factors), color="#6600CC", size=1.1)
path <- paste(base_path, "/factors/", "error ranges RE smoothing by "
,smoothing, ".png", sep="")
ggsave(filename = path, plot = my.p, width = 50, height = 5, units = "in", limitsize=FALSE)
shortened_table$smoothing_clusters <- smooth_data(shortened_table$clusters, smoothing)
shortened_table$smoothing_validity <- smooth_data(shortened_table$validity, smoothing)
my.p <- ggplot(shortened_table)
my.p <- my.p + geom_point(mapping=aes(x=time.passed, y=clusters), color="#AAAAAA") +
scale_x_continuous(limits=c(1,nrow(data)), breaks=(1:23)*100) + ggtitle(paste("Clusters number, smoothing by", smoothing))
my.p <- my.p + geom_line(mapping=aes(x=time.passed, y=smoothing_clusters), color="#6600CC", size=1.1)
path <- paste(base_path, "/factor-number/", "clusters numbers, smoothing by "
,smoothing, ".png", sep="")
ggsave(filename = path, plot = my.p, width = 50, height = 5, units = "in", limitsize=FALSE)
my.p <- ggplot(shortened_table)
my.p <- my.p + geom_line(mapping=aes(x=time.passed, y=validity), color="#AAAAAA") +
scale_x_continuous(limits=c(1,nrow(data)), breaks=(1:23)*100) + ggtitle(paste("Clusters validity, smoothing by", smoothing))
my.p <- my.p + geom_line(mapping=aes(x=time.passed, y=smoothing_validity), color="#6600CC", size=1.1)
path <- paste(base_path, "/factor-number/", "clusters validity, smoothing by "
,smoothing, ".png", sep="")
ggsave(filename = path, plot = my.p, width = 50, height = 5, units = "in", limitsize=FALSE)
}
rM$true_positives <- (rM$abs == 0)
shortened_table <- rM[rM$model == "RE < 0.1",] # whatever model, just 1 line per result is needed
true_positives_percentage <- nrow(shortened_table[shortened_table$true_positives == TRUE,])/nrow(shortened_table)
tpp_path <- paste(error_ranges_graph, "true_positives_percentage.csv")
write.csv(file=tpp_path, x=data.frame("True Positive Percentage", true_positives_percentage))
}
}
}
|
e40d978faf2891904e82f6e0af19e80e3f917bd1
|
995182d67072061d54b02947055d1fb66e319e7a
|
/compare_tef2.R
|
788193e3d1ec52cdb8d01a6553686485ce39d79e
|
[] |
no_license
|
healyke/FestR_replacement_tests
|
4e75caa6e916217806e74f44198a31c1cd74cf66
|
70dc0cab15464a9ca38cbd175d5680d942618fc9
|
refs/heads/master
| 2016-08-11T00:33:52.867069
| 2016-04-01T13:36:01
| 2016-04-01T13:36:01
| 53,622,159
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 728
|
r
|
compare_tef2.R
|
compare_tef <- function(Tef.data = c(),
isotope = c(carbon, nitrogen),
Tef.output = c()){
obv_delta <- unlist(plot_list(Tef.data, isotope = isotope)$delta.plot.list)
######Carbon = 1, Nitrogen = 3.5 this doesnt work
#standard_diff <- vector()
#if(isotope == "carbon"){
# standard_diff <- obv_delta - 1
#} else{
# standard_diff <- diff(obv_delta, 3.5)}
#######Use Caut
###need to fill in
#######
#####
diff_mean <- list()
diff_int <- list()
for(i in 1:(length(obv_delta))){
diff_mean[[i]] <- hdr(Tef.output[[i]])$mode - obv_delta[i]
diff_int[[i]] <- diff(hdr(Tef.output[[i]])$hdr["95%",])
}
return(list(diff_mean = diff_mean, diff_int = diff_int))
}
|
018915cbd817b04a93a0a892b970c7f0244215b4
|
f1dd2f4825547b6880b2f3efa2727137ba532a8d
|
/transferAndylab/automaticRules/experimentQualitativeRules.R
|
269e115948397f9e703609bd38dc39dd44609abf
|
[] |
no_license
|
mkirchhof/rslAppl
|
38b21f17456140012f890c30a14ccceba81caa2e
|
c6b9e66c790a3d879dfdfa28e2decc8dfb5e6f33
|
refs/heads/main
| 2023-06-18T15:30:26.809778
| 2021-07-23T07:32:46
| 2021-07-23T07:32:46
| 371,723,181
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,393
|
r
|
experimentQualitativeRules.R
|
# Get the ID that SLURM hands over to the script as argument
folds <- as.integer(Sys.getenv("PBS_ARRAYID"))
if(is.na(folds)){
folds <- 1
}
cat("ID = ", folds, "\n")
nRules <- 90
# Dependencies:
library("parallel")
source("rsl_qualitative.R")
library(microbenchmark)
# Initialize cluster
ntasks <- 10
cl <- makeCluster(ntasks)
clusterSetRNGStream(cl, iseed=22122020 - 1)
set.seed(22122020 - 1)
clusterEvalQ(cl, source("rsl_qualitative.R"))
# load data
load("../../lara/data/data.RData")
load("9_rsl.RData")
# Change colnames of xyzActual stuff
colnames(data$trainActual) <- .getAllLabelNodes(rsl)[1:7]
colnames(data$valActual) <- .getAllLabelNodes(rsl)[1:7]
colnames(data$testActual) <- .getAllLabelNodes(rsl)[1:7]
classes1 <- c("st", "wa", "kn", "cr", "si", "ly")
classes2 <- c("u", "bf", "bs", "os", "oh")
classes3 <- c("re", "pi", "pl", "rl", "ca", "fm", "sc", "id")
data$trainActual$L8 <- NA_character_
data$trainActual$L9 <- NA_character_
data$trainActual$L10 <- NA_character_
# Select the rule
set.seed(123)
rule <- .getAllRules(rsl)[sample(90, 8)][folds]
# evaluate and save
clusterSetRNGStream(cl, iseed=22122020 - 1)
set.seed(22122020 - 1)
pred <- predict(rsl, data$test, cluster = cl, method = "approximate",
measureRule = rule)
save(pred, file = paste0(folds, "_pred.RData"))
# stop cluster
stopCluster(cl)
|
97df6e8e3baf91e800dc920dda0e2539cf78a330
|
1bacdd09c333e1fd459c35c405fa83e46e04349e
|
/revisions/model predictions.R
|
9babdfd505f4fd7dbb21e98c54a858a25601b831
|
[] |
no_license
|
robertlynch66/Migrations-revisions-NHB
|
ec135458e70d5b90701372cbda29752e795eddb1
|
7256ecde5f2c93b960289baac3ec4c5fa37a07f5
|
refs/heads/master
| 2020-04-01T03:55:41.552160
| 2019-01-30T11:40:53
| 2019-01-30T11:40:53
| 152,841,960
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,993
|
r
|
model predictions.R
|
#### Final models for OSR project
data <- readRDS("full_pop_data.rds")
data <- readRDS("C:/Users/rofrly/Dropbox/Github/Populism_ms/full_pop_data.rds")
# scale social capital index
data$soc_cap_indx_14 <- data$soc_cap_indx_14 -min(data$soc_cap_indx_14,na.rm=T)
data$soc_cap_indx_14 <- data$soc_cap_indx_14/(max(data$soc_cap_indx_14,na.rm=T))
data$sk05 <- data$sk05 - min(data$sk05, na.rm=T)
data$sk05 <- data$sk05/max(data$sk05, na.rm=T)
data$sk2014_over_2005 <- data$soc_cap_indx_14-data$sk05
# log population and median hh income differences
data$pop_change_16_to_8 <- log(data$pop_2014) -log(data$pop_2010)
data$median_hh_income_08 <- data$median_hh_income_16_to_8 + data$median_hh_income
data$median_hh_income_16_to_8 <- log(data$median_hh_income) -log(data$median_hh_income_08)
# add weights
data$total_votes_16_gen <- data$trump_16+data$clinton_16
data$total_votes_16_08_gen <- data$trump_16+data$mccain_08
# for the primaries scale by population not votes
#data$pop_2014
library(dplyr)
library(emmeans)
library(lme4)
library(betareg)
# Model 1 Trump vs Clinton
#check correls amongst predictors
mod1 <- data %>% select(trump_perc_16, pop_density_2014,drug_16_scaled,alcohol_16_scaled,
suicides_16_scaled,perc_bachelors,
diversity_idx_2016,soc_cap_indx_14,perc_white,
perc_hisp,pop_density_2014,median_hh_income,
total_votes_16_gen,total_votes_16_08_gen,pop_2014)
summary(mod1)
cor(mod1,use = "complete.obs")
mod1 <- mod1[complete.cases(mod1),]
saveRDS(mod1,"data for models/model_1_df.rds")
write.xlsx(mod1,"data for models/model_1_df.xlsx", sheetName="Sheet1")
### beta regression with weights using betareg - 4 models each
# model 1 trump percent 16
m1 = betareg(trump_perc_16 ~ soc_cap_indx_14 +
suicides_16_scaled +drug_16_scaled+
log(pop_density_2014)+log(median_hh_income)+perc_white+perc_hisp+diversity_idx_2016,
weights=total_votes_16_gen,
data = mod1)
options(digits = 6)
options(scipen=999)
summary(m1)
### make predcitions from model 1
z1 <- data.frame("suicides_16_scaled" = rep(mean(mod1$suicides_16_scaled),2570),
#"suicides_16_over_08" = seq(from =-36, to =47, length.out =1381),
"trump_perc_16" = mod1$trump_perc_16,
"drug_16_scaled" = rep(mean(mod1$drug_16_scaled),2570),
"soc_cap_indx_14" = rep(mean(mod1$soc_cap_indx_14),2570),
"perc_white" = rep(mean(mod1$perc_white),2570),
"perc_hisp" = rep(mean(mod1$perc_hisp),2570),
"median_hh_income" = rep(60000,2570),
"pop_density_2014" = rep(mean(mod1$pop_density_2014),2570),
"diversity_idx_2016" = rep(mean(mod1$diversity_idx_2016),2570),
"perc_bachelors" = rep(mean(mod1$perc_bachelors),2570))
# get the model predictions and variance from your new df (like link in rethinking)
predicted.suicides1 <- cbind(z1[,1],
predict(m1,newdata=z1,type="response"),
predict(m1, newdata=z1, type = "variance")
) %>% as.data.frame()
head(predicted.suicides1)
# model 2 sanders percent 16
mod2 <- data %>% select(sanders_perc_16, pop_density_2014,drug_16_scaled,alcohol_16_scaled,
suicides_16_scaled,perc_bachelors,pop_2014,
diversity_idx_2016,soc_cap_indx_14,perc_white,
perc_hisp,pop_density_2014,median_hh_income)
summary(mod2)
cor(mod2,use = "complete.obs")
mod2 <- mod2[complete.cases(mod2),]
write.xlsx(mod2,"data for models/model_2_df.xlsx", sheetName="Sheet1")
saveRDS(mod2,"data for models/model_2_df.rds")
m2 = betareg(sanders_perc_16 ~ soc_cap_indx_14 +
suicides_16_scaled +drug_16_scaled+
log(pop_density_2014)+log(median_hh_income)+perc_white+perc_hisp+diversity_idx_2016,
weights=pop_2014,
data = mod2)
summary(m2)
z2 <- data.frame("suicides_16_scaled" = rep(mean(mod2$suicides_16_scaled),2767),
#"suicides_16_over_08" = seq(from =-36, to =47, length.out =1381),
"sanders_perc_16" = mod2$sanders_perc_16,
"drug_16_scaled" = rep(mean(mod2$drug_16_scaled),2767),
"soc_cap_indx_14" = rep(mean(mod2$soc_cap_indx_14),2767),
"perc_white" = rep(mean(mod2$perc_white),2767),
"perc_hisp" = rep(mean(mod2$perc_hisp),2767),
"median_hh_income" = rep(30000,2767),
"pop_density_2014" = rep(mean(mod2$pop_density_2014),2767),
"diversity_idx_2016" = rep(mean(mod2$diversity_idx_2016),2767),
"perc_bachelors" = rep(mean(mod2$perc_bachelors),2767))
# get the model predictions and variance from your new df (like link in rethinking)
predicted.suicides2 <- cbind(z2[,1],
predict(m2,newdata=z2,type="response"),
predict(m2, newdata=z2, type = "variance")
) %>% as.data.frame()
head(predicted.suicides2)
#model 3 mccain vs trump
mod3 <- data %>% select(trump_perc_16_vs_08,drug_16_over_08,alcohol_16_over_08,pop_density_2014,
suicides_16_over_08,diversity_idx_2016,perc_bachelors,
sk2014_over_2005,percent_white_16_to_8, pop_change_16_to_8,
percent_hispanic_16_to_8,median_hh_income_16_to_8,total_votes_16_08_gen)
summary(mod3)
cor(mod3,use = "complete.obs")
mod3 <- mod3[complete.cases(mod3),]
saveRDS(mod3,"data for models/model_3_df.rds")
write.xlsx(mod3,"data for models/model_3_df.xlsx", sheetName="Sheet1")
m3 = betareg(trump_perc_16_vs_08 ~ drug_16_over_08+alcohol_16_over_08+
suicides_16_over_08+
sk2014_over_2005+percent_white_16_to_8+ pop_change_16_to_8+
percent_hispanic_16_to_8+median_hh_income_16_to_8+
log(pop_density_2014)+diversity_idx_2016+perc_bachelors,
weights=total_votes_16_08_gen,
data = mod3)
summary(m3)
z3 <- data.frame("suicides_16_over_08" = rep(mean(mod3$suicides_16_over_08),1379),
"alcohol_16_over_08" = rep(mean(mod3$alcohol_16_over_08),1379),
"trump_perc_16_vs_08" = mod3$trump_perc_16_vs_08,
"drug_16_over_08" = rep(mean(mod3$drug_16_over_08),1379),
"sk2014_over_2005" = rep(-0.15,1379),
"percent_white_16_to_8" = rep(mean(mod3$percent_white_16_to_8),1379),
"percent_hispanic_16_to_8" = rep(mean(mod3$percent_hispanic_16_to_8),1379),
"median_hh_income_16_to_8" = rep(mean(mod3$median_hh_income_16_to_8),1379),
"pop_density_2014" = rep(mean(mod3$pop_density_2014),1379),
"diversity_idx_2016" = rep(mean(mod3$diversity_idx_2016),1379),
"perc_bachelors" = rep(mean(mod3$perc_bachelors),1379),
"pop_change_16_to_8"=rep(mean(mod3$pop_change_16_to_8,1379)))
# get the model predictions and variance from your new df (like link in rethinking)
predicted.suicides3 <- cbind(z3[,1],
predict(m3,newdata=z3,type="response"),
predict(m3, newdata=z3, type = "variance")
) %>% as.data.frame()
head(predicted.suicides3)
# model 4 sanders vs obama
mod4 <- data %>% select(sanders_perc_16_vs_08,drug_16_over_08,alcohol_16_over_08,pop_density_2014,
suicides_16_over_08,diversity_idx_2016,perc_bachelors,
sk2014_over_2005,percent_white_16_to_8, pop_change_16_to_8,
percent_hispanic_16_to_8,median_hh_income_16_to_8,pop_2014)
summary(mod4)
cor(mod4,use = "complete.obs")
model_sanders_vs_obama = betareg(sanders_perc_16_vs_08 ~ drug_16_over_08+alcohol_16_over_08+
suicides_16_over_08+
sk2014_over_2005+percent_white_16_to_8+ pop_change_16_to_8+
percent_hispanic_16_to_8+median_hh_income_16_to_8+
log(pop_density_2014)+diversity_idx_2016+perc_bachelors,
weights=pop_2014,
data = mod4)
summary(model_sanders_vs_obama)
# model longitudnal changes but sanders vs hillary
mod5 <- data %>% select(sanders_perc_16,drug_16_over_08,alcohol_16_over_08,pop_density_2014,
suicides_16_over_08,diversity_idx_2016,perc_bachelors,
sk2014_over_2005,percent_white_16_to_8, pop_change_16_to_8,
percent_hispanic_16_to_8,median_hh_income_16_to_8,pop_2014)
summary(mod5)
cor(mod5,use = "complete.obs")
mod5 <- mod5[complete.cases(mod5),]
saveRDS(mod5,"data for models/model_5_df.rds")
write.xlsx(mod5,"data for models/model_5_df.xlsx", sheetName="Sheet1")
m5 = betareg(sanders_perc_16 ~ drug_16_over_08+alcohol_16_over_08+
suicides_16_over_08+
sk2014_over_2005+percent_white_16_to_8+ pop_change_16_to_8+
percent_hispanic_16_to_8+median_hh_income_16_to_8+
log(pop_density_2014)+diversity_idx_2016+perc_bachelors,
weights=pop_2014,
data = mod5)
summary(m5)
z5 <- data.frame("suicides_16_over_08" = rep(mean(mod5$suicides_16_over_08),1298),
"alcohol_16_over_08" = rep(mean(mod5$alcohol_16_over_08),1298),
"sanders_perc_16" = mod5$sanders_perc_16,
"drug_16_over_08" = rep(mean(mod5$drug_16_over_08),1298),
"sk2014_over_2005" = rep(-0.15,1298),
"percent_white_16_to_8" = rep(mean(mod5$percent_white_16_to_8),1298),
"percent_hispanic_16_to_8" = rep(mean(mod5$percent_hispanic_16_to_8),1298),
"median_hh_income_16_to_8" = rep(mean(mod5$median_hh_income_16_to_8),1298),
"pop_density_2014" = rep(mean(mod5$pop_density_2014),1298),
"diversity_idx_2016" = rep(mean(mod5$diversity_idx_2016),1298),
"perc_bachelors" = rep(mean(mod5$perc_bachelors),1298),
"pop_change_16_to_8"=rep(mean(mod5$pop_change_16_to_8,1298)))
# get the model predictions and variance from your new df (like link in rethinking)
predicted.suicides5 <- cbind(z5[,1],
predict(m5,newdata=z5,type="response"),
predict(m5, newdata=z5, type = "variance")
) %>% as.data.frame()
head(predicted.suicides5)
# model longitudinal changes but trump vs clinton
mod6 <- data %>% select(trump_perc_16,drug_16_over_08,alcohol_16_over_08,pop_density_2014,
suicides_16_over_08,diversity_idx_2016,perc_bachelors,
sk2014_over_2005,percent_white_16_to_8, pop_change_16_to_8,
percent_hispanic_16_to_8,median_hh_income_16_to_8,pop_2014)
summary(mod6)
mod6 <- mod6[complete.cases(mod6),]
saveRDS(mod6,"data for models/model_6_df.rds")
write.xlsx(mod6,"data for models/model_6_df.xlsx", sheetName="Sheet1")
cor(mod6,use = "complete.obs")
m6 = betareg(trump_perc_16 ~ drug_16_over_08+alcohol_16_over_08+
suicides_16_over_08+
sk2014_over_2005+percent_white_16_to_8+ pop_change_16_to_8+
percent_hispanic_16_to_8+median_hh_income_16_to_8+
log(pop_density_2014)+diversity_idx_2016+perc_bachelors,
weights=pop_2014,
data = mod6)
summary(m6)
z6 <- data.frame("suicides_16_over_08" = rep(mean(mod6$suicides_16_over_08),1381),
"alcohol_16_over_08" = rep(mean(mod6$alcohol_16_over_08),1381),
"trump_perc_16" = mod6$trump_perc_16,
"drug_16_over_08" = rep(mean(mod6$drug_16_over_08),1381),
"sk2014_over_2005" = rep(0,1381),
"percent_white_16_to_8" = rep(mean(mod6$percent_white_16_to_8),1381),
"percent_hispanic_16_to_8" = rep(mean(mod6$percent_hispanic_16_to_8),1381),
"median_hh_income_16_to_8" = rep(mean(mod6$median_hh_income_16_to_8),1381),
"pop_density_2014" = rep(mean(mod6$pop_density_2014),1381),
"diversity_idx_2016" = rep(mean(mod6$diversity_idx_2016),1381),
"perc_bachelors" = rep(mean(mod6$perc_bachelors),1381),
"pop_change_16_to_8"=rep(mean(mod6$pop_change_16_to_8,1381)))
# get the model predictions and variance from your new df (like link in rethinking)
predicted.suicides6 <- cbind(z6[,1],
predict(m6,newdata=z6,type="response"),
predict(m6, newdata=z6, type = "variance")
) %>% as.data.frame()
head(predicted.suicides6)
#### make figure for longitudnal predcitors and clinton vs sanders
library(ggplot2)
# plot the points (actual observations), regression line, and confidence interval
# run model
m1 <- betareg(trump_perc_16 ~ drug_16_over_08+alcohol_16_over_08+
suicides_16_over_08+
sk2014_over_2005+percent_white_16_to_8+ pop_change_16_to_8+
percent_hispanic_16_to_8+median_hh_income_16_to_8+
log(pop_density_2014)+diversity_idx_2016+perc_bachelors,
weights=pop_2014,
data = trump_clinton)
summary(m1)
# make a df with the same number fo rows as the model used
# make a new df with the actual values of suicides or a sequence and the mean values of all the other variables
x1 <- data.frame("suicides_16_over_08" = trump_clinton$suicides_16_over_08,
#"suicides_16_over_08" = seq(from =-36, to =47, length.out =1381),
"trump_perc_16" = trump_clinton$trump_perc_16,
"drug_16_over_08" = rep(mean(trump_clinton$drug_16_over_08),1381),
"alcohol_16_over_08" = rep(mean(trump_clinton$alcohol_16_over_08),1381),
"sk2014_over_2005" = rep(mean(trump_clinton$sk2014_over_2005),1381),
"percent_white_16_to_8" = rep(mean(trump_clinton$percent_white_16_to_8),1381),
"pop_change_16_to_8" = rep(mean(trump_clinton$pop_change_16_to_8),1381),
"percent_hispanic_16_to_8" = rep(mean(trump_clinton$percent_hispanic_16_to_8),1381),
"median_hh_income_16_to_8" = rep(mean(trump_clinton$median_hh_income_16_to_8),1381),
"pop_2014" = trump_clinton$pop_2014,
"pop_density_2014" = rep(mean(trump_clinton$pop_density_2014),1381),
"diversity_idx_2016" = rep(mean(trump_clinton$diversity_idx_2016),1381),
"perc_bachelors" = rep(mean(trump_clinton$perc_bachelors),1381))
# get the model predictions and variance from your new df (like link in rethinking)
predicted.suicides1 <- cbind(x1[,1],x1[,2],x1[,10],
predict(m1,newdata=x1,type="response"),
predict(m1, newdata=x1, type = "variance")
) %>% as.data.frame()
predicted.suicides1$cat <- "suicides"
# name variables
colnames(predicted.suicides1) <- c("pred_obs","trump_obs" ,"pop", "trump_predicted","variance","cat")
# scale suicides 'pred_obs' to a value between 0 and 1
predicted.suicides1$pred_obs = (predicted.suicides1$pred_obs-min(predicted.suicides1$pred_obs))/
(max(predicted.suicides1$pred_obs)-min(predicted.suicides1$pred_obs))
# make a dataframe predicting social capital change
x2 <- data.frame("suicides_16_over_08" = rep(mean(trump_clinton$suicides_16_over_08),1381),
#"suicides_16_over_08" = seq(from =-36, to =47, length.out =1381),
"trump_perc_16" = trump_clinton$trump_perc_16,
"drug_16_over_08" = rep(mean(trump_clinton$drug_16_over_08),1381),
"alcohol_16_over_08" = rep(mean(trump_clinton$alcohol_16_over_08),1381),
"sk2014_over_2005" = trump_clinton$sk2014_over_2005,
"percent_white_16_to_8" = rep(mean(trump_clinton$percent_white_16_to_8),1381),
"pop_change_16_to_8" = rep(mean(trump_clinton$pop_change_16_to_8),1381),
"percent_hispanic_16_to_8" = rep(mean(trump_clinton$percent_hispanic_16_to_8),1381),
"median_hh_income_16_to_8" = rep(mean(trump_clinton$median_hh_income_16_to_8),1381),
"pop_2014" = trump_clinton$pop_2014,
"pop_density_2014" = rep(mean(trump_clinton$pop_density_2014),1381),
"diversity_idx_2016" = rep(mean(trump_clinton$diversity_idx_2016),1381),
"perc_bachelors" = rep(mean(trump_clinton$perc_bachelors),1381))
# get the model predictions and variance from your new df (like link in rethinking)
predicted.suicides2 <- cbind(x2[,5],x2[,2],x2[,10],
predict(m1,newdata=x2,type="response"),
predict(m1, newdata=x2, type = "variance")
) %>% as.data.frame()
predicted.suicides2$cat <- "social capital"
# name variables
colnames(predicted.suicides2) <- c("pred_obs","trump_obs" ,"pop", "trump_predicted","variance","cat")
#predicted.suicides2$pred_obs <-scalar1(predicted.suicides2$pred_obs)
predicted.suicides2$pred_obs = (predicted.suicides2$pred_obs-min(predicted.suicides2$pred_obs))/
(max(predicted.suicides2$pred_obs)-min(predicted.suicides2$pred_obs))
# row bind the 2 df's
d <- rbind(predicted.suicides1,predicted.suicides2)
#### make figure 3 for longitudnal predictors and trump vs clinton
cols <-c("#0066CC", "#FF9900")
p <- ggplot(d, aes(pred_obs,trump_obs,colour = cat))#, size=pop
p <- p + geom_point(aes(size = pop))+
scale_size(name = "County population",
breaks = c(10000, 100000, 250000, 500000,1000000,2000000),
labels = c("10k", "100k", "250k", "500k", "1 mil","2 mil"))
p <- p + geom_line(aes(pred_obs, trump_predicted))
p <- p + geom_ribbon(aes(ymin=trump_predicted-variance,ymax=trump_predicted+variance), alpha=0.3)+
scale_color_manual(values=cols) +
#theme_Publication()+
scale_y_continuous(name="Percentage voting for\nTrump vs Clinton in 2016",breaks=c(0.25,0.5,0.75),
labels=c('25%','50%','75%'))+
scale_colour_manual(
name="Key",
values = cols,
breaks = c("suicides", "social capital","pop"),
labels = c("Suicides", "Social Capital","Population")
)+
scale_x_continuous(name="Change in social capital and per capita suicides\nbetween 2008 and 2016 (standardized)",
breaks=c(0.00,1.00),labels=c("Maximum decrease","Maximum increase"))+
ggtitle("A decline in social capital and an increase in per capita suicides between\n2008 and 2016 predicts support for Trump")+
# Label appearance
theme_bw()+
theme(legend.text = element_text(size = 8, face = "bold"))+
theme(plot.title = element_text(size=10,face = "bold", hjust = 0.5))+
theme(axis.title.x =element_text (size=8,face="bold"))+
theme(axis.title.y = element_text (size=8,face="bold"))+
theme(legend.title = element_text(size=9,face = "bold"))+
theme(panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"))
p
ggsave(p, filename = "Figure 3.png", width = 6, height = 4, device = "png", dpi = 600,units = "in")
### make figure 4 change in suicides and social capita vs support for sanders in 2016
library(ggplot2)
# plot the points (actual observations), regression line, and confidence interval
# run model
mod5 <- data %>% select(sanders_perc_16,drug_16_over_08,alcohol_16_over_08,pop_density_2014,
suicides_16_over_08,diversity_idx_2016,perc_bachelors,
sk2014_over_2005,percent_white_16_to_8, pop_change_16_to_8,
percent_hispanic_16_to_8,median_hh_income_16_to_8,pop_2014)
summary(mod5)
cor(mod5,use = "complete.obs")
mod5 <- mod5[complete.cases(mod5),]
m2 <- betareg(sanders_perc_16 ~ drug_16_over_08+alcohol_16_over_08+
suicides_16_over_08+
sk2014_over_2005+percent_white_16_to_8+ pop_change_16_to_8+
percent_hispanic_16_to_8+median_hh_income_16_to_8+
log(pop_density_2014)+diversity_idx_2016+perc_bachelors,
weights=pop_2014,
data = mod5)
summary(m2)
# mod5 has 1298 cases
# make a df with the same number fo rows as the model used
# make a new df with the actual values of suicides or a sequence and the mean values of all the other variables
x2 <- data.frame("suicides_16_over_08" = mod5$suicides_16_over_08,
#"suicides_16_over_08" = seq(from =-36, to =47, length.out =1381),
"sanders_perc_16" = mod5$sanders_perc_16,
"drug_16_over_08" = rep(mean(mod5$drug_16_over_08),1298),
"alcohol_16_over_08" = rep(mean(mod5$alcohol_16_over_08),1298),
"sk2014_over_2005" = rep(mean(mod5$sk2014_over_2005),1298),
"percent_white_16_to_8" = rep(mean(mod5$percent_white_16_to_8),1298),
"pop_change_16_to_8" = rep(mean(mod5$pop_change_16_to_8),1298),
"percent_hispanic_16_to_8" = rep(mean(mod5$percent_hispanic_16_to_8),1298),
"median_hh_income_16_to_8" = rep(mean(mod5$median_hh_income_16_to_8),1298),
"pop_2014" = mod5$pop_2014,
"pop_density_2014" = rep(mean(mod5$pop_density_2014),1298),
"diversity_idx_2016" = rep(mean(mod5$diversity_idx_2016),1298),
"perc_bachelors" = rep(mean(mod5$perc_bachelors),1298))
# get the model predictions and variance from your new df (like link in rethinking)
predicted.suicides1 <- cbind(x2[,1],x2[,2],x2[,10],
predict(m2,newdata=x2,type="response"),
predict(m2, newdata=x2, type = "variance")
) %>% as.data.frame()
predicted.suicides1$cat <- "suicides"
# name variables
colnames(predicted.suicides1) <- c("pred_obs","sanders_obs" ,"pop", "sanders_predicted","variance","cat")
# scale suicides 'pred_obs' to a value between 0 and 1
predicted.suicides1$pred_obs = (predicted.suicides1$pred_obs-min(predicted.suicides1$pred_obs))/
(max(predicted.suicides1$pred_obs)-min(predicted.suicides1$pred_obs))
# make a dataframe predicting social capital change
x3 <- data.frame("suicides_16_over_08" = rep(mean(mod5$suicides_16_over_08),1298),
#"suicides_16_over_08" = seq(from =-36, to =47, length.out =1381),
"sanders_perc_16" = mod5$sanders_perc_16,
"drug_16_over_08" = rep(mean(mod5$drug_16_over_08),1298),
"alcohol_16_over_08" = rep(mean(mod5$alcohol_16_over_08),1298),
"sk2014_over_2005" = mod5$sk2014_over_2005,
"percent_white_16_to_8" = rep(mean(mod5$percent_white_16_to_8),1298),
"pop_change_16_to_8" = rep(mean(mod5$pop_change_16_to_8),1298),
"percent_hispanic_16_to_8" = rep(mean(mod5$percent_hispanic_16_to_8),1298),
"median_hh_income_16_to_8" = rep(mean(mod5$median_hh_income_16_to_8),1298),
"pop_2014" = mod5$pop_2014,
"pop_density_2014" = rep(mean(mod5$pop_density_2014),1298),
"diversity_idx_2016" = rep(mean(mod5$diversity_idx_2016),1298),
"perc_bachelors" = rep(mean(mod5$perc_bachelors),1298))
# get the model predictions and variance from your new df (like link in rethinking)
predicted.suicides2 <- cbind(x3[,5],x3[,2],x3[,10],
predict(m2,newdata=x3,type="response"),
predict(m2, newdata=x3, type = "variance")
) %>% as.data.frame()
predicted.suicides2$cat <- "social capital"
# name variables
colnames(predicted.suicides2) <- c("pred_obs","sanders_obs" ,"pop", "sanders_predicted","variance","cat")
#predicted.suicides2$pred_obs <-scalar1(predicted.suicides2$pred_obs)
predicted.suicides2$pred_obs = (predicted.suicides2$pred_obs-min(predicted.suicides2$pred_obs))/
(max(predicted.suicides2$pred_obs)-min(predicted.suicides2$pred_obs))
# row bind the 2 df's
d2 <- rbind(predicted.suicides1,predicted.suicides2)
#### make figure 3 for longitudnal predictors and trump vs clinton
|
9d534b25cec598cfbca52771cba02bed7b441580
|
2a3f6b843354fb60f5ca97d3580e512aa5771a0e
|
/R scripts/Statistical_tests.R
|
e26f41eaf1d899422d83b082881dc95107e90d67
|
[] |
no_license
|
anmako/Rstats
|
b954415e4988b0d8b5adfd8ba4a495173b9fa3a4
|
6145570551f14695b9eae83e9841b244a84350e2
|
refs/heads/master
| 2020-03-30T14:20:25.031099
| 2020-01-03T12:15:05
| 2020-01-03T12:15:05
| 151,313,176
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,070
|
r
|
Statistical_tests.R
|
## Basic Statistical Tests Explained with R
## (1) Shapiro Test: Testing for normality
normaly_disb <- rnorm(100, mean = 5, sd = 1)
shapiro.test(normaly_disb)
not_normally_dist <- runif(100)
shapiro.test(not_normally_dist)
## If p-Value is less than the significance level (0.05), the null-hypothesis that it is normally distributed can be rejected.
## (2) One Sample t-test
set.seed(100)
x <- rnorm(50, mean = 10, sd = 0.5)
t.test(x, mu = 10)
## In above case, the p-Value is not less than significance level of 0.05, therefore the null hypothesis that the mean=10 cannot be rejected. Also note that the 95% confidence interval range includes the value 10 within its range.
## (3) Wilcoxon Signed Rank Test
# To test the mean of a sample when normal distribution is not assumed. Wilcoxon signed rank test can be an alternative to t-Test, especially when the data sample is not assumed to follow a normal distribution. It is a non-parametric method used to test if an estimate is different from its true value.
numeric_vector <- c(20, 29, 24, 19, 20, 22, 28, 23, 19, 19)
wilcox.test(numeric_vector, mu=20, conf.int = TRUE)
## (4) Two Sample t-Test and Wilcoxon Rank Sum Test
# Both t.Test and Wilcoxon rank test can be used to compare the mean of 2 samples. The difference is t-Test assumes the samples being tests is drawn from a normal distribution, while, Wilcoxon???s rank sum test does not.
x <- c(0.80, 0.83, 1.89, 1.04, 1.45, 1.38, 1.91, 1.64, 0.73, 1.46)
y <- c(1.15, 0.88, 0.90, 0.74, 1.21)
wilcox.test(x, y, alternative = "g")
t.test(1:10, y = c(7:20))
#What if we want to do a 1-to-1 comparison of means for values of x and y?
t.test(x, y, paired = TRUE)
wilcox.test(x, y, paired = TRUE)
## (5) Shapiro Test
# To test if a sample follows a normal distribution
set.seed(100)
normaly_disb <- rnorm(100, mean=5, sd=1)
shapiro.test(normaly_disb)
set.seed(100)
not_normaly_disb <- runif(100) #uniform distribution
shapiro.test(not_normaly_disb)
## (6) Kolmogorov And Smirnov Test
#Kolmogorov-Smirnov test is used to check whether 2 samples follow the same distribution.
x <- rnorm(50)
y <- runif(50)
ks.test(x, y)
x <- rnorm(50)
y <- rnorm(50)
ks.test(x, y)
## (7) Fisher???s F-Test
#Fisher???s F test can be used to check if two samples have same variance.
x <- rnorm(50)
y <- runif(50)
var.test(x, y)
x <- rnorm(50)
y <- rnorm(50)
var.test(x, y)
## (8) Chi Squared Test
#Chi-squared test in R can be used to test if two categorical variables are dependent, by means of a contingency table.
#Example use case: You may want to figure out if big budget films become box-office hits. We got 2 categorical variables (Budget of film, Success Status) each with 2 factors (Big/Low budget and Hit/Flop), which forms a 2 x 2 matrix.
chisq.test(table(categorical_X, categorical_Y), correct = FALSE)
summary(table(categorical_X, categorical_Y))
#There are two ways to tell if they are independent:
#By looking at the p-Value: If the p-Value is less that 0.05, we fail to reject the null hypothesis that the x and y are independent. So for the example output above, (p-Value=2.954e-07), we reject the null hypothesis and conclude that x and y are not independent.
#From Chi.sq value: For 2 x 2 contingency tables with 2 degrees of freedom (d.o.f), if the Chi-Squared calculated is greater than 3.841 (critical value), we reject the null hypothesis that the variables are independent. To find the critical value of larger d.o.f contingency tables, use qchisq(0.95, n-1), where n is the number of variables.
## (9) Correlation
#To test the linear relationship of two continuous variables
cor.test(cars$speed, cars$dist)
## (10) More Commonly Used Tests
fisher.test(contingencyMatrix, alternative = "greater") # Fisher's exact test to test independence of rows and columns in contingency table
friedman.test() # Friedman's rank sum non-parametric test
#The package lawstat has a good collection. The outliers package has a number of test for testing for presence of outliers.
install.packages("lawstat")
library(lawstat)
|
ddd29c4a66073e637d9bb021d8a8c6d4e011b6be
|
f109648c4a8a5a663bda80b3b3dd43421771978e
|
/Assignment_Hospital Quality/rankhospital.R
|
73b24acc0a70cabecbab7886a549fa609d6af409
|
[] |
no_license
|
s1g9/R-Programming
|
0e0f58bcdc75aaee8bce137384dfc24772c4758c
|
76fa5e9337beee114b7f5f88c6208e30ffc2ca6b
|
refs/heads/master
| 2020-12-31T07:54:52.672727
| 2015-11-20T20:14:06
| 2015-11-20T20:14:06
| 46,577,603
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,023
|
r
|
rankhospital.R
|
rankhospital <- function(state, outcome, num = "best"){
data <- read.csv("rprog-data-ProgAssignment3-data/outcome-of-care-measures.csv")
subsetstate <- data[data$State==state,]
if (!nrow(subsetstate)){
stop("invalid state")
}
cause<-NA
if(outcome=="heart attack") {
cause <- "Heart.Attack"
}
if(outcome=="heart failure") {
cause <- "Heart.Failure"
}
if(outcome=="pneumonia") {
cause<- "Pneumonia"
}
if(is.na(cause)){stop("invalid outcome")}
## Check that state and outcome are valid
colum <- paste("Hospital.30.Day.Death..Mortality..Rates.from", cause, sep=".")
mycolums<- c("Hospital.Name",colum)
##print(mycolums)
data_subset <- subsetstate[,mycolums]
##print(data_subset)
data_need <- data_subset[!data_subset[colum]=="Not Available",]
##print(data_need)
id <- order(as.double(as.matrix(data_need[colum])),data_need["Hospital.Name"])
if (num=="best") {num<-1}
if (num=="worst") {num<-length(id)}
ans <- data_need[id[num],"Hospital.Name"]
as.vector(ans)
}
|
8b59b0a8df6308aab351767c06afbfa0fa8557d7
|
cbaa250faba198eb548d83e39967ed2057cc8daa
|
/DMRegressionFreq2/man/DM_fit_Freq.Rd
|
50135ca954f303a85fdf20fc4464401e3ad1c9ac
|
[] |
no_license
|
carriegu/STAT-840
|
3418718f0b0c8bab2812270cdc281af66190c1aa
|
d6447ec79b07c09f457bd9bd5631f530aa362dca
|
refs/heads/master
| 2022-04-18T13:28:32.649076
| 2020-04-12T20:34:06
| 2020-04-12T20:34:06
| 255,138,685
| 0
| 0
| null | 2020-04-13T03:09:13
| 2020-04-12T17:51:12
|
R
|
UTF-8
|
R
| false
| true
| 596
|
rd
|
DM_fit_Freq.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DM_fit_Freq.R
\name{DM_fit_Freq}
\alias{DM_fit_Freq}
\title{Estimate beta coefficients of the Dirichlet-Multinomial regression model
using Frequentist approach.}
\usage{
DM_fit_Freq(Y, X)
}
\arguments{
\item{Y}{Matrix of \verb{n x J} responses.}
\item{X}{Matrix of \verb{n x (P+1)} covariates.}
}
\value{
beta_hat the fitted matrix of \verb{(P+1) x J} parameters and the corresponding Fisher-info.
}
\description{
Estimate beta coefficients of the Dirichlet-Multinomial regression model
using Frequentist approach.
}
|
55177f0b93aaf7d267fc35f0f32587468f536ce2
|
812720f93b43704a1bb00c16716c74e2e637fd4f
|
/man/depart.LDL.Rd
|
8b270f6ee504ee685f60e72f0ffc491ffb3b6cb1
|
[] |
no_license
|
cran/HAPim
|
9df01704bb002f166674d189790fc19a59ecc789
|
8b189df9b1547d74bfbad541ed2c1af88b18054f
|
refs/heads/master
| 2020-05-17T15:48:30.314265
| 2009-10-10T00:00:00
| 2009-10-10T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,626
|
rd
|
depart.LDL.Rd
|
\name{depart.LDL}
\alias{depart.LDL}
\title{starting values for the optimization of HAPimLDL method}
\description{
The function calculates the starting value of the error variance and the starting value of the QTL effect for the optimization of HAPimLDL method.
It can be viewed as an internal function.
The user does not have to call it by himself.
}
\usage{
depart.LDL(moyenne.pere, perf, CD, PLA, desc.pere)
}
\arguments{
\item{moyenne.pere}{results provided by \code{moyenne.pere()} function, mean of half-sib family performances.}
\item{perf}{numeric vector of length=number of individuals which contains the performances of individuals.}
\item{CD}{numeric vector of length=number of individuals which contains the CD of individuals. var(perf$_i$)=s/CD$^2_i$}
\item{PLA}{numeric vector (number of individuals)
which contains transmission probabilities at a single test position.}
\item{desc.pere}{results provided by \code{descendant.pere()} function, numeric matrix (number of sires x 2)
which gives for each sire, the first and last indexes of its progeny.}
}
\value{
The returned value is a numeric vector of length=2 which contains estimates of the error variance and the Q allele effect.
}
\references{publication to be submitted: C. Cierco-Ayrolles, S. Dejean, A. Legarra, H. Gilbert,
T. Druet, F. Ytournel, D. Estivals, N. Oumouhou and B. Mangin.
Combining linkage analysis and linkage disequilibrium for QTL fine mapping in animal pedigrees.}
\author{S. Dejean, N. Oumouhou, D. Estivals, B. Mangin }
\seealso{\code{\link{moyenne.pere}}, \code{\link{descendant.pere}} }
\keyword{models}
|
acd7c6c5ee65cfde3e08df75181c4422672e6a5b
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/5606_0/rinput.R
|
16233b13d48a12b13af277f60da8b08982c370cf
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("5606_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="5606_0_unrooted.txt")
|
e5c05bca280e5db442ca2d7fbf80a8207282b611
|
7a3410528d2ce4cf0aaa53f01a2486d705543283
|
/R/plot_tf_idf.R
|
1fbac5f0caf3cc1b903e8e2bae4e38aacd0c6d0c
|
[
"MIT"
] |
permissive
|
deandevl/RtextminerPkg
|
85907057a4cb259f22b6677a7c8547b32b3d7f59
|
da0893ef95c52baed1c333927addee20fc13f317
|
refs/heads/main
| 2023-03-19T20:25:57.510189
| 2022-10-25T13:28:01
| 2022-10-25T13:28:01
| 238,649,574
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,385
|
r
|
plot_tf_idf.R
|
# Title : plot_tf_idf
# Objective : plot the top tf_idf values in a bar chart
# Created by: Rick Dean
# Created on: 2021-01-07 8:43 AM
#
#' Function plots the top \code{N} ordered tf_idf values
#'
#' @description Function brings together RtextminerPkg::get_tf_idf() and
#' RplotterPkg::create_bar_plot() to plot the top \code{N} ordered tf_idf
#' values from a data frame of text.
#'
#' @param tf_idf the data frame output from RtextminerPkg::get_tf_idf().
#' @param N the number of top tf_idf values to bar chart
#' @param feature_id_val the feature_id character string for which the top tf_idf values are
#' to be plotted.
#' @param title A string that sets the overall title.
#' @param subtitle A string that sets the overall subtitle.
#' @param x_title A string that sets the x axis title.
#' @param y_title A string that sets the y axis title.
#' @param center_titles A logical which if \code{TRUE} centers both the \code{title} and \code{subtitle}.
#' @param rot_y_tic_label A logical which if TRUE rotates the y tic labels 90 degrees for enhanced readability.
#' @param bar_fill A string that sets the fill color attribute for the bars.
#' @param bar_color A string that sets the outline color attribute for the bars.
#' @param bar_alpha A numeric that sets the alpha component attribute to \code{bar_color}.
#' @param bar_size A numeric that sets the outline thickness attribute of the bars.
#' @param bar_width A numeric that sets the width attribute of the bars.
#' @param y_limits A numeric 2 element vector that sets the minimum and maximum for the y axis.
#' Use NA to refer to the existing minimum and maximum.
#' @param y_major_breaks A numeric vector or function that defines the exact major tic locations along the y axis.
#' @param y_minor_breaks A numeric vector or function that defines the exact minor tic locations along the y axis.
#' @param y_labels A character vector with the same length as \code{y_major_breaks}, that labels the major tics.
#' @param axis_text_size A numeric that sets the font size along the axis'. Default is 11.
#' @param do_coord_flip A logical which if \code{TRUE} will flip the x and y axis'.
#' @param bar_labels A logical which if \code{TRUE} will label each bar with its value.
#' @param bar_label_size A numeric that sets the size of the bar labels
#' @param bar_label_color A string that sets the color of the bar labels
#' @param show_major_grids A logical that controls the appearance of major grids.
#' @param show_minor_grids A logical that controls the appearance of minor grids.
#'
#' @importFrom tokenizers tokenize_words
#' @importFrom data.table as.data.table
#' @importFrom data.table setkey
#' @importFrom data.table setkeyv
#' @importFrom data.table setnames
#' @importFrom data.table setorderv
#' @importFrom RplotterPkg create_bar_plot
#' @import ggplot2
#'
#' @return A plot object.
#'
#' @author Rick Dean
#'
#' @export
plot_tf_idf <- function(
tf_idf = NULL,
N = 15,
feature_id_val = NULL,
title = NULL,
subtitle = NULL,
x_title = "token",
y_title = "tf_idf",
center_titles = FALSE,
rot_y_tic_label = FALSE,
bar_fill = NA,
bar_color = "black",
bar_alpha = 1.0,
bar_size = 1.0,
bar_width = NULL,
y_limits = NULL,
y_major_breaks = waiver(),
y_minor_breaks = waiver(),
y_labels = waiver(),
axis_text_size = 11,
do_coord_flip = FALSE,
bar_labels = FALSE,
bar_label_size = 4,
bar_label_color = "black",
show_major_grids = TRUE,
show_minor_grids = TRUE
) {
tf_idf_top_dt <- tf_idf[feature_id == feature_id_val]
tf_idf_plot <- RplotterPkg::create_bar_plot(
df = tf_idf_top_dt[1:N],
aes_x = "token",
aes_y = "tf_idf",
title = title,
subtitle = subtitle,
x_title = x_title,
y_title = y_title,
center_titles = center_titles,
rot_y_tic_label = rot_y_tic_label,
bar_fill = bar_fill,
bar_color = bar_color,
bar_alpha = bar_alpha,
bar_size = bar_size,
bar_width = bar_width,
y_limits = y_limits,
y_major_breaks = y_major_breaks,
y_minor_breaks = y_minor_breaks,
y_labels = y_labels,
axis_text_size = axis_text_size,
do_coord_flip = do_coord_flip,
bar_labels = bar_labels,
bar_label_size = bar_label_size,
bar_label_color = bar_label_color,
show_major_grids = show_major_grids,
show_minor_grids = show_minor_grids,
order_bars = "asc"
)
return(tf_idf_plot)
}
|
fcde577d3b49518788774f79250e32e4771e9388
|
c96fb047660d57547921e01de547ffcdfc2af4f8
|
/man/setupGSEArun.Rd
|
b4f8141d29cca4dba7c4fae643b4b219803ee6e0
|
[
"MIT"
] |
permissive
|
BaderLab/POPPATHR
|
d5a3acf04fdda8ce3e9ad6ef41ade62dee7f8052
|
19290bfdaaa3ff06c9cfcad72f04b3f3e789007b
|
refs/heads/master
| 2023-06-23T21:30:03.556374
| 2021-06-09T20:35:43
| 2021-06-09T20:35:43
| 201,321,105
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,292
|
rd
|
setupGSEArun.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/setupGSEArun.R
\name{setupGSEArun}
\alias{setupGSEArun}
\title{Sets up and runs GSEA using population-based FST values}
\usage{
setupGSEArun(
fst_file,
annotation_file,
snp2gene_file,
SET_PERM = 10000,
SNP2GENE_DIST = 5e+05,
MIN_GENE = 10,
MAX_GENE = 300,
SET_SEED = 42,
output_folder
)
}
\arguments{
\item{fst_file}{(char) path to file with SNP-level FST statistics.}
\item{annotation_file}{(char) path to pathway definitions GMT file.}
\item{snp2gene_file}{(char) path to file with snp2gene mappings (output of SNP2gene.R).}
\item{SET_PERM}{(integer) set cycle of permutations to run (default=10000).}
\item{SNP2GENE_DIST}{(integer) value for GSEA --distance.
Max. distance between SNP and gene for their association in snp2gene_file
(default=500000).}
\item{MIN_GENE}{(integer) value for GSEA --setmin.
Min. number of genes in a gene set to be considered (default=10).}
\item{MAX_GENE}{(integer) value for GSEA --setmax.
Max. number of genes in a gene set to be considered (default=300).}
\item{SET_SEED}{(integer) value for GSEA --seed.}
\item{output_folder}{(char) path to store output files.}
}
\value{
none
}
\description{
Sets up and runs GSEA using population-based FST values
}
|
9051bcb1c9d560b126e4e0c93b0a7780e8901382
|
6c7783c0da4511ea88f1d334849a41f288d157b7
|
/03_scripts/10_cooccurence.R
|
1432e875fde7badba95d2636f2bde3037d44d3ed
|
[] |
no_license
|
skraftarcher/LA_FW_Sponges
|
fd59296b245edbd92a1aedfe326692e5ce41e30e
|
39c6d325144bf9af40152fe6a41b7f291559e1fd
|
refs/heads/main
| 2023-07-11T13:39:14.195450
| 2021-08-17T18:31:23
| 2021-08-17T18:31:23
| 311,411,292
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 873
|
r
|
10_cooccurence.R
|
install.packages("cooccur")
library(cooccur)
library(tidyverse)
library(vegan)
sp1<-read_rds("02_wdata/sponges_pa_Nov162020.rds")
sp2<-data.frame(sp1 %>%
select(Site,sponges,ab)%>%
mutate(ab = ifelse(is.na(ab),0,ab))%>%
pivot_wider(names_from = Site,values_from = ab))
rownames(sp2)<-sp2$sponges
sp2<-sp2[,-1]
sp2<-sp2[,colSums(sp2)!=0]
sp3<-decostand(sp2,"pa")
sp.coocur<-cooccur(mat=sp3, type="spp_site",thresh = TRUE,spp_names = TRUE)
summary(sp.coocur)
plot(sp.coocur)
library(EcoSimR)
myModel <- cooc_null_model(speciesData=sp3,suppressProg=TRUE)
summary(myModel)
plot(myModel,type = "cooc")
plot(myModel,type = "burn_in")
plot(myModel,type="hist")
library(cooccur)
library(visNetwork)
nodes <- sp3(id = 1:nrow(sponges),
label = rownames(sponges),
color = “#606482”,
shadow = TRUE)
|
4e3bae4d58b280b3b21fe37ea6ce6387e29f8724
|
799468ce526db6f14f2aa5003c601e259e5f0d62
|
/man/stage.vector.plot.Rd
|
a6abff5dca6f9d3362a027a15541cd64fcc49416
|
[] |
no_license
|
kostask84/popbio
|
6aa45015bfc1659bd97f2ce51ad5246b8d434fac
|
682d3ffb922dfab4fd2c7fc7179af2b0d926edfd
|
refs/heads/master
| 2021-05-09T02:01:42.050755
| 2017-02-09T21:44:20
| 2017-02-09T21:44:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,849
|
rd
|
stage.vector.plot.Rd
|
\name{stage.vector.plot}
\alias{stage.vector.plot}
\title{ Plot stage vector projections }
\description{
Plots short-term dynamics and convergence to stage stage distribution using stage vector projections.
}
\usage{
stage.vector.plot(stage.vectors, proportions=TRUE, legend.coords="topright",
ylim=NULL, xlab="Years", ylab=NULL, col=rainbow(8), ... )
}
\arguments{
\item{stage.vectors}{ a matrix listing stage class vectors in columns}
\item{proportions}{ plot proportional changes or total numbers, defaults to proportions. }
\item{legend.coords}{ a \code{\link{legend}} keyword or vector of x,y coordinates, defaults to top-right corner }
\item{ylim}{the y limits of the plot, defaults to min and max values in stage.vectors}
\item{xlab}{a label for the x axis}
\item{ylab}{a label for the y axis}
\item{col}{vector of line colors, defaults to rainbow(8)}
\item{...}{additional options are passed to \code{\link{plot}} function}
}
\details{ A plot of stage or age class projections }
\references{see section 2.2 in Caswell 2001 }
\author{ Chris Stubben }
\seealso{ see \code{\link{pop.projection}} }
\examples{
## matrix from Example 2.1 in Caswell
A<-matrix(c(
0, 0.3, 0,
1, 0, 0.5,
5, 0, 0
), nrow=3, dimnames=list(1:3,1:3))
n<-c(1,0,0)
p<-pop.projection(A,n,60)
## Plots in Figure 2.3
stage.vector.plot(p$stage.vector[,1:15], col='black', las=1, prop=FALSE)
stage.vector.plot(p$stage.vector[,1:40], col=2:4, las=1)
## log-scale with custom y-axis
stage.vector.plot(p$stage.vector, col=2:4, prop=FALSE,
ylim=c(.01, 10), log='y', legend="bottomright", yaxt='n')
pwrs<- -2:1
# major ticks
axis(2, at = 10^pwrs, labels=parse(text=paste("10^", pwrs, sep = "")),
las=1, tcl= -.6)
# minor ticks
axis(2, at = 1:9 * rep(10^pwrs[-1] / 10, each = 9),
tcl = -0.3, labels = FALSE)
}
\keyword{ survey }
|
4f492d57cd3bc46fe4c53279a6bb88496e78eec6
|
7ada85f56845751f5636ed7918114a20d52ddaaa
|
/Ch4.R
|
ea80dc1c42c14ac52a9b944c1a945296446d4540
|
[] |
no_license
|
nravishanker/FCILM-2
|
8e67225c274ae92b55fe2cc69dde2d20489a2a5e
|
1901d54e1d3650955a6c5683aa41bbe8f6d27b97
|
refs/heads/master
| 2023-08-29T09:12:20.704051
| 2021-10-28T22:22:34
| 2021-10-28T22:22:34
| 375,077,095
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,472
|
r
|
Ch4.R
|
## Simple Linear Regression (SLR) Model
# Dataset: cars in the R library "car"
library(car)
data("cars")
attach(cars)
plot(dist, speed)
# Simple correlation
cor(dist, speed)
corr.test <- cor(dist, speed)
cor.test
# OLS estimation in an SLR model
mod.slr <- lm(speed ~ dist, data = cars)
smod <- summary(mod.slr)
# Pull out items from the output list
smod$sigma #standard error
smod$coefficients[, 1] #least square estimates
smod$coefficients[, 2] #standard error of least square estimates
smod$r.squared #R-square
smod$adj.r.squared # Adjusted R-square
## Multiple Linear Regression (MLR) Model
# Dataset: stackloss in the R library "datasets"
library(datasets)
library(car)
data(stackloss)
attach(stackloss)
scatterplotMatrix(stack.loss)
data <- cbind(stack.loss, Air.Flow, Water.Temp, Acid.Conc.)
(cor.mat <- cor(data))
# OLS estimation in an MLR model
mod <- lm(stack.loss ~ Air.Flow + Water.Temp
+ Acid.Conc., data = stackloss)
(smod <- summary(mod))
smod$sigma #standard error
smod$coefficients[, 1] #least square estimates
smod$coefficients[, 2] #standard error of least square estimates
smod$r.squared #R-square
smod$adj.r.squared # Adjusted R-square
## One-way fixed-effects ANOVA model
# Dataset: PlantGrowth in the R library "stats"
data(PlantGrowth)
fit.lm <- lm(weight ~ group, data = PlantGrowth)
summary(fit.lm)
#or
summary(fit <- lm(weight ~ group, data = PlantGrowth))
#or
summary(fit1 <- aov(weight ~ group, data = PlantGrowth))
coef(fit1)
|
3cc27b3389d36330d87b72e10987118010830c57
|
5350036523d7a905429605a828865e5ddd95d3c5
|
/emoji_scrape.R
|
dcd0ae6d4c308020575d593e31de84e566c121f6
|
[] |
no_license
|
Mvondoivan/Text_mining_sauce
|
e5adb8b34627966260a8964c0eded70ee4fafde0
|
664ae99e73e8b92d9e6d8fff3e7d41da15633d75
|
refs/heads/master
| 2020-03-22T08:16:16.866897
| 2018-11-27T05:55:00
| 2018-11-27T05:55:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,395
|
r
|
emoji_scrape.R
|
scrap_all_emoji=function(){
str_url=c('https://emojipedia.org/apple/', 'https://emojipedia.org/emojidex/', 'https://emojipedia.org/emojione/',
'https://emojipedia.org/emojipedia/11.1/', 'https://emojipedia.org/facebook/', 'https://emojipedia.org/google/',
'https://emojipedia.org/htc/', 'https://emojipedia.org/lg/', 'https://emojipedia.org/messenger/',
'https://emojipedia.org/microsoft/', 'https://emojipedia.org/mozilla/', 'https://emojipedia.org/samsung/',
'https://emojipedia.org/twitter/', 'https://emojipedia.org/whatsapp/')
scrap_emo=function(y){
etsi=function(x, ncol){
text_imo=x%>%str_split(., '"')%>%unlist(.)%>%.[grep(pattern = "https://" , .)]%>%.[ncol]%>%
str_split(., '/')%>%unlist(.)%>%.[length(.)]%>%str_split(., '[.]')%>%unlist(.)%>%.[1]%>%
str_split(., '_')%>%unlist(.)
#if (length(text_imo)==4) text_imo=c(paste(text_imo[1],text_imo[2],sep='_'), text_imo[3])
if (length(text_imo)==4) text_imo=c(text_imo[1], text_imo[3])
return(text_imo)
}
download.file(y, destfile = "scrapedpage.html", quiet=T)
text_emoji = read_html("scrapedpage.html")%>%html_node(.,".emoji-grid")%>%xml_children(.)%>%
xml_children(.)%>%xml_children(.)%>%as.character()%>%lapply(.,etsi, 1)%>%
data_frame(name.imoji=unlist(.)[c(T,F)],unicode.imoji=unlist(.)[c(F,T)])%>%
select(one_of(c("name.imoji", "unicode.imoji")))
text_emoji$unicode.imoji=str_split(text_emoji$unicode.imoji, '-')%>%
lapply(., FUN=function(x) paste0('0x', unlist(x))%>%lapply(., intToUtf8)%>%unlist()%>%paste0(., collapse = ''))%>%
unlist()%>%as.character()%>%iconv(., "latin1", "ASCII", 'byte')
return(text_emoji)}
text_emoji=lapply(str_url, scrap_emo)%>%do.call(bind_rows, args=.)%>%.[duplicated(.)==F,]
return(text_emoji)}
|
5c07748291211e15444a103c93173054e09cde06
|
66d59625dd1c08762976ff2fb2917659abb60a3b
|
/code/process/testCNDRtoABAprint.R
|
46e16e57e35f2d2b0fba3f02418f141e90cb31cf
|
[
"MIT"
] |
permissive
|
ejcorn/tau-spread
|
37811e2b5ba600c4903d02f2d30eaa69fe69cca0
|
eda26928b726370ffad8ba57772e85f84e547bc0
|
refs/heads/master
| 2023-06-30T04:39:32.402466
| 2021-07-20T22:18:40
| 2021-07-20T22:18:40
| 242,880,836
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,106
|
r
|
testCNDRtoABAprint.R
|
# print which regions match and don't match between ABA and CNDR mappings
#################
### Load data ###
#################
grp <- 'IgG1'
rm(list=setdiff(ls(),c('params','grp')))
basedir <- params$basedir
setwd(basedir)
savedir <- paste(params$opdir,'diffmodel/',sep='')
dir.create(savedir,recursive=T)
source('code/misc/fitfxns.R')
source('code/misc/miscfxns.R')
load(paste(params$opdir,'processed/pathdata.RData',sep='')) # load path data and ROI names
#load(paste(params$opdir,'processed/Snca.RData',sep='')) # load Snca expression
# get mean pathology for only time point, 1 month
tp <- 1
Mice <- path.data[path.data$Condition == grp,-1]
Grp.mean <- colMeans(Mice,na.rm = T)
W <- readMat(paste(params$opdir,'processed/W.mat',sep=''))$W
X.ABA <- c(1:length(region.names))
names(X.ABA) <- region.names
CNDR.names <- path.names
X.CNDR <- map.ABA.to.CNDR(X.ABA,CNDR.names,ABA.to.CNDR.key)
X.CNDR <- c(1:length(path.names))
names(X.CNDR) <- path.names
ABA.names <- region.names
X.ABA <- map.CNDR.to.ABA(X.CNDR,ABA.names,CNDR.to.ABA.key)
names(CNDR.to.ABA.key)[sapply(CNDR.to.ABA.key,length)==0]
|
c98eeead10babd1ec62a8a2d14d9afca0d8ec8df
|
d6ebc7fc723fe6280478a2c4e6c303adbdc0e364
|
/R/shinydashboardPlusGallery.R
|
a22d9c5b00d94f0471bb3fda5528355af610efc6
|
[] |
no_license
|
pvictor/shinydashboardPlus
|
19cafc572b53077f1ad5aa79752b73b6d17e9872
|
df71208b1489f5e8e9b8da0c51dbd8a6a94367e7
|
refs/heads/master
| 2020-03-14T23:15:57.456885
| 2018-05-02T11:37:08
| 2018-05-02T11:37:08
| 131,840,991
| 0
| 0
| null | 2018-05-02T11:27:31
| 2018-05-02T11:27:31
| null |
UTF-8
|
R
| false
| false
| 978
|
r
|
shinydashboardPlusGallery.R
|
#' @title Launch the shinydashboardPlus Gallery
#'
#' @description
#' A gallery of all components available in shinydashboardPlus.
#'
#' @export
#'
#' @examples
#'
#' if (interactive()) {
#'
#' shinydashboardPlusGallery()
#'
#' }
shinydashboardPlusGallery <- function() { # nocov start
if (!requireNamespace(package = "shinydashboard"))
message("Package 'shinydashboard' is required to run this function")
if (!requireNamespace(package = "styler"))
message("Package 'styler' is required to run this function")
if (!requireNamespace(package = "shinyAce"))
message("Package 'shinyAce' is required to run this function")
if (!requireNamespace(package = "shinyWidgets"))
message("Package 'shinyWidgets' is required to run this function")
if (!requireNamespace(package = "shinyjqui"))
message("Package 'shinyjqui' is required to run this function")
shiny::shinyAppFile(system.file('examples/app.R', package = 'shinydashboardPlus', mustWork = TRUE))
}
|
800b2c1194581a0046bda71d49c30c8b96cd2cfa
|
86706fbbece1928081502b6ad124fde660e9a40f
|
/spotify_app.R
|
6aa6edb231f2af59cd28de1cba803ef426e00233
|
[] |
no_license
|
ntsherm2/spotify
|
8530c6d1853f9f9a4ede764a33f1f8e71279d23e
|
93c251036b0270a3ec89571646b932d501af9860
|
refs/heads/master
| 2021-01-05T08:23:21.654562
| 2020-02-16T20:08:40
| 2020-02-16T20:08:40
| 240,951,408
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,363
|
r
|
spotify_app.R
|
library(shiny)
library(ggplot2)
library(dplyr)
library(mlr)
library(DT)
chill3.0 = readRDS('chill3.0.rds', refhook = NULL)
ui = pageWithSidebar(
# App title ----
headerPanel('Spotify Playlist Analysis'),
# Sidebar panel for inputs ----
sidebarPanel(
selectInput('var_x', 'X Variable:',
c('danceability' = 'danceability','energy' = 'energy','loudness' = 'loudness', 'speechiness' = 'speechiness', 'acousticness' = 'acousticness','instrumentalness' = 'instrumentalness','liveness' = 'liveness','valence' = 'valence', 'tempo' = 'tempo')),
selectInput('var_y', 'Y Variable:',
c('danceability' = 'danceability','energy' = 'energy','loudness' = 'loudness', 'speechiness' = 'speechiness', 'acousticness' = 'acousticness','instrumentalness' = 'instrumentalness','liveness' = 'liveness','valence' = 'valence', 'tempo' = 'tempo')),
selectInput('var_k', '# Clusters (k):',
c('2' = 2, '3' = 3, '4' = 4, '5' = 5, '6' = 6, '7' = 7)),
selectInput('view_k', 'Cluster to List Songs:',
c('1' = 1, '2' = 2, '3' = 3, '4' = 4, '5' = 5, '6' = 6, '7' = 7))
),
mainPanel(
h2('Clustering of my "Chill" Playlist'),
h3(textOutput('caption')),
plotOutput('clusterPlot'), #,
#tableOutput('clusterTable')
dataTableOutput('clusterTable')
)
)
server = function(input, output) {
formulaText = reactive({
paste(input$var_y,'~', input$var_x)
})
output$caption = renderText({
formulaText()
})
chill4.0 = reactive({
chill.kmeans = chill3.0 %>% select('danceability', 'energy', 'loudness', 'speechiness', 'acousticness', 'instrumentalness', 'liveness', 'valence', 'tempo') %>%
normalizeFeatures(method = 'standardize') %>%
kmeans(centers = as.numeric(input$var_k), nstart = 25)
cbind(chill3.0,cluster = chill.kmeans$cluster)
})
output$clusterPlot = renderPlot({
chill4.0() %>% ggplot(aes_string(x = input$var_x, y = input$var_y, color = as.factor(chill4.0()$cluster))) +
geom_point() #color = paste('~',as.factor(cluster))
})
output$clusterTable = renderDataTable({
chill4.0() %>% select('artist', 'tracks', 'cluster') %>% filter(cluster == input$view_k) %>% arrange(artist)
})
}
shinyApp(ui, server)
|
fb651139a413a0680493bc96de55c6412d64dfec
|
e36e8d5859f764ffa3e6f18d2b5dcd6bbd4e80f0
|
/man/df.gis_wsp.Rd
|
089127d1571ca55c0ee5a5dfebdc542ddbe9e5a3
|
[
"MIT"
] |
permissive
|
ropensci/rrricanes
|
23855df40a5cc598b94ec90ac9e32c70b291e2a8
|
533454c8e4d3b7dff6dc2a6592a7b304fef41fdb
|
refs/heads/main
| 2023-01-07T17:56:01.118103
| 2022-12-31T18:29:58
| 2022-12-31T18:29:58
| 74,975,357
| 19
| 9
|
NOASSERTION
| 2022-12-31T18:29:59
| 2016-11-28T13:25:12
|
R
|
UTF-8
|
R
| false
| true
| 437
|
rd
|
df.gis_wsp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{df.gis_wsp}
\alias{df.gis_wsp}
\title{GIS wind speed probabilities for Hurricane Sandy (AL182012)}
\format{
An object of class \code{list} of length 3.
}
\source{
\url{http://www.nhc.noaa.gov/gis/archive_wsp.php}
}
\usage{
df.gis_wsp
}
\description{
GIS wind speed probabilities for Hurricane Sandy (AL182012)
}
\keyword{datasets}
|
7bb2cbf3ecc0fc7f2ebd44953eeade33c88a91d8
|
ec43bdacb37a1e923b27a85b96be8774f7e3e722
|
/man/securities_returns.Rd
|
aebab61bb9381c7344a95fa4b497c7ac21a09c90
|
[] |
no_license
|
irudnyts/estudy2
|
32e9ffcf3d9b0053bb5330556a6e3233fbf752fb
|
4824af33ab3c8a7f2a60f2e412355e7510c87b26
|
refs/heads/master
| 2022-11-13T03:36:30.018707
| 2022-04-12T12:36:33
| 2022-04-12T12:36:33
| 38,372,777
| 13
| 4
| null | 2022-10-21T07:52:05
| 2015-07-01T13:32:16
|
R
|
UTF-8
|
R
| false
| true
| 722
|
rd
|
securities_returns.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{securities_returns}
\alias{securities_returns}
\title{Returns of seven companies from 2019-04-01 to 2020-04-01}
\format{
A list of eight \code{zoo} elements:
\itemize{
\item AMZN
\item ZM
\item UBER
\item NFLX
\item SHOP
\item FB
\item UPWK
}
}
\usage{
securities_returns
}
\description{
A list of length seven, elements of which are objects of the class
\code{returns}. The list contains all necessary returns from 2019-04-01 to
2020-04-01 of seven companies, which could profit from COVID-19 lockdown. See
examples of \code{\link{apply_market_model}} for the dataset generation.
}
\keyword{datasets}
|
50f16279b649a17ccd300f9c43359899fdb1ae98
|
28169cfd99b95aeb5ad2391881ac994d40614fcf
|
/coup_exploration.R
|
5582e7b730cc0b292c075314cfb950f5e269ca7c
|
[] |
no_license
|
pscharfe/Predicting-Coup-Outcomes-in-R
|
0ca477f1a7a7b9d819687d567cfeedb1d7baf0dc
|
10ff1ac007d0513dce93323690ca15563e85b6b5
|
refs/heads/master
| 2023-02-02T08:42:42.457324
| 2020-12-20T03:55:51
| 2020-12-20T03:55:51
| 231,047,511
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,410
|
r
|
coup_exploration.R
|
library(tidyverse)
require(stargazer)
library(stats)
library(dplyr)
library(janitor)
library(boot)
library(knitr)
library(rpart)
library(rpart.plot)
library(ggplot2)
library(randomForest)
library(gbm)
library(Hmisc)
# Introduction: The goal of this script is to explore the basic trends in the data, especially regarding coup success and violence.
# These outcomes are coded in the dataset as `realized_coup` and `were_others_than_the_incumbent_killed`.
## Build a dataframe: coup_df
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
coup_csv <- read_csv("coupdata.csv")
colnames(coup_csv) %<>% stringr::str_replace_all("\\s","_") %>% tolower
coup_df <- data.frame(coup_csv)
colnames(coup_df)
coup_df <- coup_df %>%
filter(!is.na(realized_coup))
coup_df <- coup_df %>% clean_names()
sum(is.na(coup_df))
## Manually edit colnames of dataframe
head(coup_df)
colnames(coup_df)[which(names(coup_df) == "ambigious_coup")] <- "ambiguous_coup"
colnames(coup_df)[which(names(coup_df) == "were_others_then_the_incumbent_injured")] <- "were_others_than_the_incumbent_injured"
## Convert character columnns to numeric
# The "ordinary citizens" variable has missing values. To use this variable, we can assume that few ordinary citizens
# participate in these coups. Alternatively, we can leave in the NULLs; in that case, R will create predictors for "0" and "1"
typeof(coup_df$were_ordinary_citizens_involved)
coup_df[["were_ordinary_citizens_involved"]][is.na(coup_df["were_ordinary_citizens_involved"])] <- 0
coup_df$were_ordinary_citizens_involved <- as.numeric(coup_df$were_ordinary_citizens_involved)
coup_df <- coup_df %>%
mutate(were_ordinary_citizens_involved = (is.na(were_ordinary_citizens_involved) <- 0)) %>%
mutate(were_ordinary_citizens_involved = as.numeric(were_ordinary_citizens_involved))
sum(is.na(coup_df$were_ordinary_citizens_involved))
# If necessary, code coup type as numeric instead of "conspiracy", "attempt," etc.
coup_df <- coup_df %>%
mutate(coup_type_numeric = type_of_coup)
coup_df$coup_type_numeric <- gsub("conspiracy", "1", coup_df$coup_type_numeric)
coup_df$coup_type_numeric <- gsub("attempt", "2", coup_df$coup_type_numeric)
coup_df$coup_type_numeric <- gsub("coup", "3", coup_df$coup_type_numeric)
coup_df$coup_type_numeric <- as.numeric(coup_df$coup_type_numeric)
coup_df$type_of_coup
## Create season variables
coup_df$month_of_event
typeof(coup_df$month_of_event)
coup_df <- coup_df %>%
mutate(winter = case_when(
month_of_event == (12 | 1:2) ~ 1,
TRUE ~ 0))
coup_df <- coup_df %>%
mutate(spring = case_when(
month_of_event == (3:5) ~ 1,
TRUE ~ 0))
coup_df <- coup_df %>%
mutate(summer = case_when(
month_of_event == (6:8) ~ 1,
TRUE ~ 0))
coup_df <- coup_df %>%
mutate(autumn = case_when(
month_of_event == (9:11) ~ 1,
TRUE ~ 0))
sum(coup_df$summer) == sum(coup_df$spring)
## Create decade variables
typeof(coup_df$year)
as.numeric(coup_df$year)
coup_df$year
coup_df <- coup_df %>%
mutate(forties = case_when(
(year > 1939) & (year < 1950) ~ 1,
TRUE ~ 0)) %>%
mutate(fifties = case_when(
(year > 1949) & (year < 1960) ~ 1,
TRUE ~ 0)) %>%
mutate(sixties = case_when(
(year > 1959) & (year < 1970) ~ 1,
TRUE ~ 0)) %>%
mutate(seventies = case_when(
(year > 1969) & (year < 1980) ~ 1,
TRUE ~ 0)) %>%
mutate(eighties = case_when(
(year > 1979) & (year < 1990) ~ 1,
TRUE ~ 0)) %>%
mutate(nineties = case_when(
(year > 1989) & (year < 2000) ~ 1,
TRUE ~ 0)) %>%
mutate(aughties = case_when(
(year > 1999) & (year < 2010) ~ 1,
TRUE ~ 0))
sum(coup_df$forties)
sum(coup_df$fifties)
sum(coup_df$sixties)
sum(coup_df$seventies)
sum(coup_df$eighties)
sum(coup_df$nineties)
sum(coup_df$aughties)
# Use select(-x) to drop variables that are irrrelevant to coup success
coup_df <- coup_df %>% select(-x54, -x53, -cow_code, -day_of_event, -coup_id, -month_of_event, -type_of_coup)
coup_df[["country"]][is.na(coup_df["country"])] <- 0
sum(is.na(coup_df))
### Data Exploration
## Means
mean(coup_df$realized_coup)
mean(coup_df$were_others_than_the_incumbent_killed)
coup_df %>%
summarise(mean())
coup_df %>% filter(year != 0) %>% summarise(min(year))
coup_df %>% filter(year != 0) %>% summarise(max(year))
coup_df %>% filter(coup_conspiracies == 1) %>% summarise(mean(realized_coup))
coup_df %>% filter(coup_conspiracies == 1) %>% summarise(mean(were_others_than_the_incumbent_killed))
# First year = 1946
# Coups by country
mean_country <- coup_df %>% group_by(country) %>% summarise((mean(realized_coup)))
mean_country %>% sort((mean(realized_coup)), decreasing = TRUE)
# Violence in successful coups
coup_df %>% filter(realized_coup == 1) %>% summarise(mean(were_others_than_the_incumbent_killed))
coup_df %>% filter(realized_coup == 1) %>% summarise(mean(were_others_than_the_incumbent_injured))
# Violence in attempted, but failed coups
coup_df %>% filter(coup_type_numeric == 2) %>% summarise(mean(were_others_than_the_incumbent_killed))
coup_df %>% filter(coup_type_numeric == 2) %>% summarise(mean(were_others_than_the_incumbent_injured))
# Egypt
coup_df %>% filter(country == "Egypt") %>% filter(year == 1954) %>% summarise(sum(cow_code))
# Egypt had four coup events in 1954
## Correlations
corr_df <- coup_df %>% select(-country, -year, -ambiguous_coup, -coup_conspiracies, -unrealized)
corr_df <- cor(corr_df)
corr_df <- data.frame(corr_df)
# Realized coup correlations
cor.test(coup_df$attempted_coup, coup_df$were_others_than_the_incumbent_killed, method = "kendall")
cor.test(coup_df$realized_coup, coup_df$were_students_or_academics_involved_in_the_coup, method = "kendall")
cor.test(coup_df$realized_coup, coup_df$were_military_actors_involved_in_the_coup, method = "kendall")
cor.test(coup_df$realized_coup, coup_df$military_coup, method = "kendall")
real_corrs <- corr_df %>% select(realized_coup)
real_corrs <- tibble::rownames_to_column(real_corrs, "variables")
# real_corrs <- real_corrs %>% sort(realized_coup, decreasing = TRUE)
real_corrs <- real_corrs[order(-real_corrs$realized_coup),]
real_corrs <- real_corrs[3:56,]
real_corrs <- real_corrs %>%
filter(variables != "were_ordinary_citizens_involved") %>%
filter(variables != "attempted_coup")
real_plot <- ggplot(real_corrs, aes(x = reorder(variables, -realized_coup), y = realized_coup)) + geom_col() +
ylab("Coup Success") + xlab("Coup Features") + ggtitle("Correlations: Coup Features => Success")
real_plot <- real_plot + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + coord_flip()
ggsave(real_plot, filename = "Success_Corrs.jpeg", height = 8, width = 8)
# real_corrs %>% filter(realized_coup > 0.1)
# real_corrs %>% filter(realized_coup < -0.01)
# write.csv(real_corrs,"real_corrs.csv")
# Violent coup correlations
violence_corrs <- corr_df %>% select(were_others_than_the_incumbent_killed)
violence_corrs <- tibble::rownames_to_column(violence_corrs, "variables")
violence_corrs <- violence_corrs %>%
filter(variables != "were_others_than_the_incumbent_killed") %>%
filter(variables != "were_ordinary_citizens_involved") %>%
filter(variables != "coup_type_numeric")
violence_corrs <- violence_corrs[order(-violence_corrs$were_others_than_the_incumbent_killed),]
violence_corrs
violence_plot <- ggplot(violence_corrs, aes(x = reorder(variables, -were_others_than_the_incumbent_killed),
y = were_others_than_the_incumbent_killed)) + geom_col() + ylab("Coup Violence") +
xlab("Coup Features") + ggtitle("Correlations: Coup Features => Violence")
violence_plot <- violence_plot + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + coord_flip()
ggsave(violence_plot, filename = "Violence_Corrs.jpeg", height = 8, width = 8)
violence_corrs %>% filter(were_others_than_the_incumbent_killed > 0.1)
violence_corrs %>% filter(were_others_than_the_incumbent_killed < -0.01)
# violence_corrs <- sort(violence_corrs$were_others_than_the_incumbent_killed, decreasing=TRUE, row.names=TRUE)
# write.csv(real_corrs,"violence_corrs", row.names = TRUE)
|
5fac535ad8cf422dbcd7c1482d1d78cff6d20d2f
|
1a953ee20468612ccf2c0fe05e3189593e8488de
|
/IBM_full_Likelihood.R
|
e01be287a1dba60fb518332de6f544ddd3af1d22
|
[] |
no_license
|
MarieAugerMethe/Babybou_IBM
|
96528350263212a4591311560db124374fce613e
|
86f9088d7d98a18bef21a9f1dbd7cffeabc7367d
|
refs/heads/master
| 2016-09-05T23:28:27.982712
| 2014-08-29T20:03:48
| 2014-08-29T20:03:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,566
|
r
|
IBM_full_Likelihood.R
|
##
# Date created: September 25th, 2013
# Authors: Marie Auger-Methe, Ulrike Schlaegel, Craig DeMars
# Please cite our paper if you use our script:
# DeMars, C., M. Auger-Methe, U. Schlaegel, S. Boutin, (Published online)
# Inferring Parturition and Neonate Survival from Movement Patterns of Female Ungulates.
# Ecology and Evolution. DOI: 10.1002/ece3.785
# For an in-depth explanation of the code see:
# Appendix S2. Likelihood Functions Used in the Individual-based Method.
# Appendix S3. R Code for the Individual-based Method.
# from the supporting information:
# ece3785-sup-0001-AppendixS1-S4.docx
# The main difference between this code and the one presented in IBM.R and the appendix S3
# is this function calculated the full likelihood instead of the approximation.
# Because, in this case, we are numerically estimating all of the parameters,
# this code will take longer to run.
# fullnll1 now replaces nllk and nll1 now optimizess fullnll1 instead of nllk.
# This mean that now both lambda and k are estimated simultaneously numerically.
# Now we have fullnll2 and changed nll2 so it optimizes simultaneously lambda and k.
# Also, nll1 and nll2 returns now both NLL value and the MLE values.
# In the function mnll3M we now have:
# columns for BIC values in resCA
# small changes in sections "Calf survived" and "Calf lost"
# (MLE values return from function nll1 and nll2 used
# instead of optimizing again for best BP)
# calculate BIC values and get model with min BIC
#################################################################
# Functions that calculate the negative log likelihood (nll) of 2 of the 3 models
# and the nll of k, one of the parameter used to calculate the nll of these models.
# Note that all models assume
# that the step length are distributed with an exponential distribution.
# They differ in how the main parameter of the exponential distribution
# varies through the time series.
# nll1 calculates the nll of a female with a calf that survived.
# This model assumes that the mean step length is constant prior to the birth of a calf.
# Once the calf is born the mean step length decrease to 1/(l_a * k),
# which should be really close to 0.
# The mean step length then linearly increases
# to reach the same mean step length as prior to calf birth.
# The speed of the increase depends on the time it takes to recover the normal movement,
# which is represented by k.
# Once the mean step length reaches the value from before the birth of the calf,
# it remains at this value.
# The parameters to estimate are:
# 1 BP, l_a, and k
# fullnll1 evaluates the negative log likelihood at parameter values (lambda, k) under model 1
fullnll1 <- function(param, SL_a, SL_b, ti_b){
# param is the vector of parameters of interest
l_a <- param[1]
k <- param[2]
# likelihood part t=1,...,BP
LL_a <- dexp(SL_a, l_a, log=T)
# likelihood part t=BP+1,...,T
z <- 1/l_a
z_b <- (ti_b-ti_b[1]+1)*z/k
z_b[z_b > z ] <- z
LL_k <- dexp(SL_b, 1/z_b, log=T)
# sum all negative log likelihoods from all parts
return(-sum(LL_a) - sum(LL_k))
}
# nll1 minimizes fullnll1 over possible values for (lambda, k)
# This results in the maximum likelihood estimates for (lambda, K)
nll1 <- function(BP, SL, ti, kc){
n <- length(SL)
###
# Divides the time series into two sections:
# a: before the birth of the calf
# b: after the birth of the calf
SL_a <- SL[1:BP]
s_b <- (BP+1):n
SL_b <- SL[s_b]
ti_b <- ti[s_b]
# Numerically estimate the MLE of lambda and k
# and obtain the corresponding negative log-likelihood values
# start values for optim are:
# for lambda: the mean step length for t=1,...,BP
# for k: 4 weeks (equivalent thereof in steps)
startvalues <- c(length(SL_a)/sum(SL_a), 4*24/(dint*subs)*7)
mnll_b <- optim(startvalues, fullnll1, method="L-BFGS-B", SL_a=SL_a, SL_b=SL_b, ti_b=ti_b, lower=c(0.0005, kc[1]), upper=c(1, kc[2]))
###
# Getting the total nll for the whole time series
nLL <- mnll_b$value
l_a <- mnll_b$par[1]
k <- mnll_b$par[2]
return(list("nLL"=nLL, "l_a"=l_a, "k"=k))
}
# nll2 calculates the nll of a female with a calf that died.
# This model assumes that the mean step length is constant prior to the birth of a calf.
# Once the calf is born the mean step length decrease to 1/(l_a * k),
# which should be really close to 0.
# The mean step length then linearly increases
# to reach the same mean step length as prior to calf birth.
# The speed of the increase depends on the time it takes to recover the normal movement,
# which is represented by k.
# Once the mean step length reaches the value from before the birth of the calf,
# it remains at this value.
# The k represent the time it takes to recover the normal movement if the calf survives.
# The previous assumptions are the same as for nll1.
# This model further assumes
# that the female immediately recovers her normal movement when the calf dies.
# This abrupt switch is represented by a second BP.
# The parameters to estimate are:
# 2 BPs, l_a, and k
# fullnll2 evaluates the negative log likelihood at parameter values (lambda, k) under model 2
fullnll2 <- function(param, SL_a, SL_b, SL_c, ti_b){
# param is the vector of parameters of interest
l_a <- param[1]
k <- param[2]
# likelihood part t=1,...,BP1
LL_a <- dexp(SL_a, l_a, log=T)
# likelihood part t=BP1+1,...,BP2
z <- 1/l_a
z_b <- (ti_b-ti_b[1]+1)*z/k
z_b[z_b > z ] <- z
LL_k <- dexp(SL_b, 1/z_b, log=T)
# likelihood part t=BP2+1,...,T
LL_c <- dexp(SL_c, l_a, log=T)
# sum all negative log likelihoods from all parts
return(-sum(LL_a) - sum(LL_k) - sum(LL_c))
}
# nll2 minimizes fullnll2 over possible values for (lambda, k)
# This results in the maximum likelihood estimates for (lambda, K)
nll2 <- function(BP, SL, ti, kc){
n <- length(SL)
# Divides the time series into three sections:
# a: before the birth of the calf
# b: after the birth of the calf but before it dies
# c: after the death of the calf
SL_a <- SL[1:BP[1]]
s_b <- (BP[1]+1):BP[2]
SL_b <- SL[s_b]
ti_b <- ti[s_b]
SL_c <- SL[(BP[2]+1):n]
# Numerically estimate the MLE of lambda and k
# and obtain the corresponding negative log-likelihood values
# start values for optim are:
# for lambda: the mean step length for t=1,...,BP
# for k: 4 weeks (equivalent thereof in steps)
startvalues <- c(length(SL_a)/sum(SL_a), 4*24/(dint*subs)*7)
mnll_b <- optim(startvalues, fullnll2, method="L-BFGS-B", SL_a=SL_a,
SL_b=SL_b, SL_c=SL_c, ti_b=ti_b,
lower=c(0.0005, kc[1]), upper=c(1, kc[2]))
# Getting the total nll for the whole time series
nLL <- mnll_b$value
l_a <- mnll_b$par[1]
k <- mnll_b$par[2]
return(list("nLL"=nLL, "l_a"=l_a, "k"=k))
}
# Function minimise the likelihood of the 3 Models
mnll3M <- function(movF, int, kcons){
# movF: list with at least 3 elements
# 1. SL: numeric vector that contains the step lengths
# measured at regular time intervals.
# NAs are not allowed.
# 2. ti: integer vector that identifies
# the time of each step length of SL.
# It is the index of the times found in tp.
# ti and SL should be of exactly the same length.
# NAs are not allowed.
# 3. tp: POSIXct vector that identifies the real date and time of the SL.
# The missing steps should be represented with NAs.
# For example if you have locations
# (0,0) (0,1) (0,3) (1,3) (7,3)
# taken at:
# 01:00, 02:00, 03:00, 08:00, 09:00
# SL = (1,2,6)
# ti = (1,2,4)
# tp = (01:00, 02:00, NA, 08:00, NA)
# (Although excluded for clarity, tp should include the date)
# We recommend that the time series only included the time period
# relevant to the birth and death of the calf.
# We only included movement from xx to xx.
# int: integer value indicating the minimum number of steps needed between
# the beginning of the time series and the first BP (birth of calf),
# the last BP (death of calf) and the end of the time series,
# and between the two BPs.
# This is required in part because you need some steps
# in most sections of the time series to estimate parameters.
# kcons: numeric vector with two values that contains
# the minimum and maximum values c(kmin,kmax) for parameter k.
# k represents the time it takes for the female with a calf that survives
# to recover her normal movement.
# We contrained our values to be the equivalent in steps of 3 and 6 weeks.
# kcons is required as it is needed by the function optimize()
# but to place no informed restriction,
# you can make kcons <- c(0,vlv), where vlv is a very large value
# Results for model comparison
resCA <- matrix(NA, 1, ncol=12)
colnames(resCA) <- c("n", "mnll_0","mnll_1","mnll_2",
"AIC0", "AIC1", "AIC2", "BM",
"BIC0", "BIC1", "BIC2", "BM_BIC")
# BPs index and actual date and time
BPs <- data.frame(matrix(NA, 1, ncol=6))
colnames(BPs) <- c("BP1c", "BP2c", "BP2l", "iBP1c", "iBP2c", "iBP2l")
# Parameters (other than the BP) of each model
mpar <- matrix(NA, 1, 5)
colnames(mpar) <- c('b0','b1','b2','k1', 'k2')
# Sample size
resCA[1] <- length(movF$SL)
##
# M0: No calf
# This model assumes that the movement pattern is constant for the whole time series
# and thus the mean step length (b0) is constant.
# The only parameter estimated is b0, which is the inverse of the rate.
# It has a analytical solution and
# thus we can easily get the minimum negative log likelihood (mnll)
mpar[1] <- sum(movF$SL)/resCA[1] #b0
resCA[2] <- -sum(dexp(movF$SL,1/mpar[1],log=T)) #mnll0
##
# Calf survived
BP1ser <- int:(resCA[1]-int) # Serie of all possible BPs
temp_1 <- lapply(BP1ser, nll1, SL=movF$SL, ti=movF$ti, kc=kcons)
NLL_1 <- as.vector(unlist(temp_1)[seq(1,length(unlist(temp_1)),3)])
MNLL_1_i <- which.min(NLL_1)
resCA[3] <- NLL_1[MNLL_1_i] # mnll_1
BPs[4] <- BP1ser[MNLL_1_i]
BPs[1] <- as.character(movF$tp[movF$ti[BPs[[4]]]]) #BP1c
mpar[2] <- 1/temp_1[[MNLL_1_i]]$l_a #b1
mpar[4] <- temp_1[[MNLL_1_i]]$k
##
# Calf lost
# Getting all possible combination of BPs
BP2ser <- combn(int:(resCA[1]-int),2)
BP2ser <- BP2ser[,diff(BP2ser) >= int]
BP2ser <- split(t(BP2ser),1:ncol(BP2ser))
# Applying the model 2 on all possible BPs
temp_2 <- lapply(BP2ser,nll2,SL=movF$SL, ti=movF$ti, kc=kcons)
NLL_2 <- as.vector(unlist(temp_2)[seq(1,length(unlist(temp_2)),3)])
MNLL_2_i <- which.min(NLL_2)
resCA[4] <- NLL_2[MNLL_2_i] # mnll_2
BPs[5:6] <- BP2ser[[MNLL_2_i]]
BPs[2] <- as.character(movF$tp[movF$ti[BPs[[5]]]]) #BP2c
BPs[3] <- as.character(movF$tp[movF$ti[BPs[[6]]]]) #BP2l
mpar[3] <- 1/temp_2[[MNLL_2_i]]$l_a #b2
mpar[5] <- temp_2[[MNLL_1_i]]$k
# Calculate AIC and compare models
resCA[5] <- 2*(resCA[2] + 1) # Only lambda to estimate
resCA[6] <- 2*(resCA[3] + 3) # lambda, k and BP
resCA[7] <- 2*(resCA[4] + 4) # lambda, k and 2*BPs
resCA[8] <- which.min(resCA[,5:7])-1
resCA[9] <- 2*resCA[2] + 1*log(resCA[1])
resCA[10] <- 2*resCA[3] + 3*log(resCA[1])
resCA[11] <- 2*resCA[4] + 4*log(resCA[1])
resCA[12] <- which.min(resCA[,9:11])-1
return(list(resCA=resCA,BPs=BPs,mpar=mpar))
}
|
76741a59db412fcf6755a34da0bb297fcc04c2db
|
a77ada7874b9f355fc43edfb8ed0ead40afa6165
|
/analysis/beats_to_blocks.R
|
1212309cc2fb9c929cb794c32e6de93ed9615959
|
[] |
no_license
|
johnson-shuffle/rsji
|
bab734aac9cf6008eeaaae7426573c8a4b555028
|
2adc2c75db415ef4c8d4a1379cd6c14c13d45630
|
refs/heads/master
| 2018-09-01T10:09:32.896051
| 2018-08-21T04:33:05
| 2018-08-21T04:33:05
| 107,724,759
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,076
|
r
|
beats_to_blocks.R
|
# ----- preample ----------------------------------------------------------
library(rgdal)
library(rgeos)
library(sp)
library(maptools)
# ----- spatial data ------------------------------------------------------
# seattle bg's
download.file('https://tinyurl.com/y9933p2x', destfile = 'raw/tmp.xlsx')
geo <- read_excel('./raw/tmp.xlsx')
names(geo) %<>% tolower()
# block groups
bgs <- readOGR(db_gis, 'blockgroups')
bgs <- spTransform(bgs, '+init=epsg:32610')
bgs <- bgs[bgs$GEOID %in% unique(str_sub(geo$geoid10, 1, 12)), ]
# beats
spd <- readOGR(db_gis, 'beats')
spd <- spTransform(spd, proj4string(blk))
# ----- sectors -----------------------------------------------------------
# sector polygons
spd_sectors <- unionSpatialPolygons(spd, str_sub(spd@data$beat, 1, 1))
spd_sectors <- SpatialPolygonsDataFrame(
spd_sectors,
data.frame(sector = names(spd_sectors)),
match.ID = F
)
# intersect
spd_bgs <- raster::intersect(spd_sectors, bgs)
# areas by sector and block group
areas <- tibble(ALAND = map_dbl(spd_bgs@polygons, ~slot(.x, 'area')))
row.names(areas) <- map_chr(spd_bgs@polygons, ~slot(.x, 'ID'))
areas <- spCbind(spd_bgs, areas)
# sectors
sectors <- areas@data %>%
mutate(pct = ALAND.1 / (ALAND + AWATER)) %>%
select(GEOID, sector, pct)
# ----- beats -------------------------------------------------------------
# beat polygons
spd_beats <- unionSpatialPolygons(spd, spd@data$beat)
spd_beats <- SpatialPolygonsDataFrame(
spd_beats,
data.frame(beat = names(spd_beats)),
match.ID = F
)
# intersect
spd_bgs <- raster::intersect(spd_beats, bgs)
# areas by sector and block group
areas <- tibble(ALAND = map_dbl(spd_bgs@polygons, ~slot(.x, 'area')))
row.names(areas) <- map_chr(spd_bgs@polygons, ~slot(.x, 'ID'))
areas <- spCbind(spd_bgs, areas)
# sectors
beats <- areas@data %>%
mutate(pct = ALAND.1 / (ALAND + AWATER)) %>%
select(GEOID, beat, pct)
# ----- add to database ---------------------------------------------------
copy_to(db, sectors, temporary = F, overwrite = T)
copy_to(db, beats, temporary = F, overwrite = T)
|
9a3eb7ebf523a0fbbbc8ecc4ab5b98aab8c83227
|
2dbaaeda91fc8c894d3ab498cde5bf2eafe2b564
|
/code/R/NEW_supplemental_figures.R
|
ef2f2523c495d558211f333d1f98c12f86dfd9f2
|
[] |
no_license
|
jlleslie/AdaptiveImmunity_and_Clearance
|
f19756b8a5f1c74031bd5fbd868d002ed2763b1c
|
bed19c78a1759da752d384b8156d1a5331ed25a5
|
refs/heads/master
| 2021-07-31T21:45:03.870499
| 2021-07-20T18:20:39
| 2021-07-20T18:20:39
| 62,152,280
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,471
|
r
|
NEW_supplemental_figures.R
|
###Supplmental Figures for Clearance Paper
#setworking directory
setwd("~/Desktop/AdaptiveImmunity_and_Clearance/data")
library(ggplot2)
library(grid)
library(scales)
library(vegan)
library(gtable)
# Modified from soure of function: http://www.cookbook-r.com/Graphs/Plotting_means_and_error_bars_(ggplot2)/
## Gives count, first quartile, median and thrid quartile
## data: a data frame.
## measurevar: the name of a column that contains the variable to be summariezed
## metadatas: a vector containing names of columns that contain grouping variables
## na.rm: a boolean that indicates whether to ignore NA's
summaryMED<-function(data=NULL, measurevar, metadata=NULL, na.rm=FALSE, .drop=TRUE){
library(plyr)
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
data1<- ddply(data, metadata, .drop=.drop,
.fun=function(xx, col){
c(N = length2(xx[[col]], na.rm=na.rm),
firstquart = (quantile(xx[[col]], na.rm=na.rm)[2]),
median = median (xx[[col]], na.rm=na.rm),
thridquart = (quantile(xx[[col]], na.rm=na.rm)[4])
)
},
measurevar
)
data1 <- rename(data1, c("median" = measurevar))
data1 <- rename(data1, c("firstquart.25%" = "firstquart.25per"))
data1 <- rename(data1, c("thridquart.75%" = "thirdquart.75per"))
return(data1)
}
############# Supp Figure 1A: Colonization in WT donor mice.
donor.col<-read.delim(file= "Adoptivetransfer_donor_colonization.txt", header = T)
#Replace the 100 in the mice that actually had undectable levles with LOD/squarroot of 2
#the LOD is 100
fill.in.lod<-100/sqrt(2) #fil.in.lod = 70.71068
donor.col$CFU_g<-replace(donor.col$CFU_g,donor.col$CFU_g== 0,fill.in.lod)
##Determine the Median and IQR for CFU grouped by Cage
donor.col.med<-summaryMED(donor.col, measurevar="CFU_g", metadata=c("Cage","Day"), na.rm=TRUE)
colors.don<-c("1500"="grey", "1502"="black")
donor.plot<-ggplot(donor.col.med, aes(x=Day, y= CFU_g, group=Cage, color=factor(Cage)))+
geom_point(size=6)+
scale_color_manual(values = colors.don) +
geom_errorbar(aes(ymin=firstquart.25per, ymax=thirdquart.75per), width=1, size=0.9)+
geom_line(size=0.9)
#theme with white background
S1A = donor.plot+
#eliminates background, gridlines and key border
theme(
panel.background = element_rect(fill = "white", color = "grey80", size = 2)
,panel.grid.major = element_line(color = "gray90", size = 0.6)
,panel.grid.major.x = element_blank()
,panel.grid.minor = element_blank()
,axis.ticks= element_line(size = 0.6, colour = "grey90")
,axis.ticks.length = unit(0.2, "cm")
,legend.background = element_blank ()
,axis.text.y=element_text(size=13)
,axis.title.y=element_text(size=13)
,axis.title.x=element_blank()
,axis.text.x=element_text(size=11)
)
S1A = S1A + labs(y = "CFU per Gram Feces")
S1A = S1A + geom_hline(aes(yintercept=100), colour = "gray10", size = 0.9, linetype=2)
S1A = S1A + scale_y_log10(breaks = scales::trans_breaks("log10", function(x) 10^x),labels = scales::trans_format("log10", scales::math_format(10^.x)))
S1A
############# Supp Figure 1B: Anti-C difficile toxin A titer in donor mice
#read in the data
antitoxin<-read.delim(file="AntitoxinA_IgGtiter_5ugmlcoat_Apri142017.txt")
# Any sample that did not have a detectable titer was given a value of 0
# One mouse did not have sample to test and therefore got a value of NA
#Pull out anti-toxin A IgG data for recipient mice and donor mice
recipient<-antitoxin[antitoxin$Genotype=="RAG", ]
donors<-antitoxin[antitoxin$Genotype=="WT", ]
# this can be done using geneotype to do this
#Statistics
#For the donor mice the LOD for this assay was a titer of 1200
#For the purpose of stats set 0 values (ie values that no titer was detected) to LOD of 1200 divided by sqare root of 2
fill.in.lod<-1200/sqrt(2) #fill.in.lod= 848.5281
#Replace 0 with fill in LOD
donors$AntitoxinA_IgG_Titer<-replace(donors$AntitoxinA_IgG_Titer,donors$AntitoxinA_IgG_Titer==0,fill.in.lod)
#Testing if values from each group are from distint distributions
#Pull out Anti-toxin IgG measured in recpient mice that got splenocytes from infected donors
donor.infect<-donors[donors$Treatment_1=="630",]
donor.infect_AntiAigg<-c(donor.infect$AntitoxinA_IgG_Titer)
#Pull out total IgG measured in mice that got splenocytes from uninfected donors
donor.mock<-donors[donors$Treatment_1=="mock",]
donor.mock_AntiAigg<-c(donor.mock$AntitoxinA_IgG_Titer)
wilcox.test(donor.infect_AntiAigg, donor.mock_AntiAigg, exact=F)
#data: donor.infect_AntiAigg and donor.mock_AntiAigg
#W = 20, p-value = 0.009277
#alternative hypothesis: true location shift is not equal to 0
#No correction is required because there is only one comparison
#to make it clear that the values that were not detected vs the values that were detected at the LOD I will replace the fill.in.lod= 848.5281 with -1000
donors$AntitoxinA_IgG_Titer<-replace(donors$AntitoxinA_IgG_Titer, donors$AntitoxinA_IgG_Titer==fill.in.lod, -10000)
#plotting
#order the dataset so that it will plot vehicle first on the left
donors$Treatment_Grp<-factor(donors$Treatment_Grp, levels = c("uninfected_donor", "630_infected_donor"))
#assign colors
colors.don<-c("uninfected_donor"="grey", "630_infected_donor"="black")
#plot
donor.antitoxin.plot<-ggplot(donors, aes(x=Treatment_Grp, y=AntitoxinA_IgG_Titer, fill= factor(Treatment_Grp), color=factor(Treatment_Grp)))+
geom_dotplot(binaxis = "y", stackdir="center", dotsize = 1.3) +
scale_color_manual(values = rep("black",2)) +
scale_fill_manual(values = colors.don, limits = c("uninfected_donor", "630_infected_donor")) +
stat_summary(fun.y = median, fun.ymin = median, fun.ymax = median, geom = "crossbar", width = 0.4, color="grey50") +
scale_y_continuous( limits = c(-10000, 300500), labels = scales::comma, breaks = c(300000, 200000, 100000)) +
geom_hline(aes(yintercept=1200), colour = "gray50", size = 1, linetype=2)+
ylab("Serum Anti-TcdA IgG Titer")
S1B = donor.antitoxin.plot +
#eliminates background, gridlines and key border
theme(
panel.background = element_rect(fill = "white", color = "grey80", size = 2)
,panel.grid.major = element_line(color = "gray90", size = 0.6)
,panel.grid.major.x = element_blank()
,panel.grid.minor = element_blank()
,axis.ticks= element_line(size = 0.6, colour = "grey90")
,axis.ticks.length = unit(0.2, "cm")
,legend.title=element_blank()
,legend.background = element_blank ()
,legend.key = element_blank ()
,legend.position="none" #if using this as a single figure change "none" to "top" or "bottom" and remove comment from the following 2 lines
,axis.text.y=element_text(size=11)
,axis.title.y=element_text(size=11)
,axis.title.x=element_blank()
,axis.text.x=element_blank()
,plot.margin = unit(c(1,1,2,1), "lines")
)
S1B
#labeling the plot
#Create text to lable plots
gtext.doninfect<-textGrob("Infected",gp = gpar(fontsize = 10))
gtext.donmock<-textGrob("Unifected", gp = gpar(fontsize = 10))
gtext.1200<-textGrob("1,200", gp=gpar(fontsize =11))
gtext.lod<-textGrob("(LOD)", gp=gpar(fontsize =11))
gtext.2star<-textGrob("**", gp = gpar(fontsize = 20))
S1B.A= S1B + annotation_custom(gtext.2star, xmin = 1.5, xmax = 1.5, ymin = 300300, ymax = 300350) + #adding 2 stars for comparsion between infected vs mock
annotate("segment", x=1, xend=2, y = 300200, yend = 300200, colour = "black", size = 0.7) +
annotation_custom(gtext.doninfect, xmin = 2, xmax = 2, ymin = -50000, ymax = -30000) +
annotation_custom(gtext.donmock, xmin = 1, xmax = 1, ymin = -50000, ymax = -30000) +
annotation_custom(gtext.1200, xmin =0.1, xmax= 0.1, ymin = 1300, ymax=1400) +
annotation_custom(gtext.lod, xmin =0.3, xmax= 0.3, ymin = 1300, ymax=1400)
g1 = ggplotGrob(S1B.A)
g1$layout$clip[g1$layout$name=="panel"] <- "off"
grid.draw(g1)
#Supp figure 2: Colonization of other mice included in Random Forest
#Colonization WT 2013 Experiment
###Read in the data for all experiments
cfu<-read.table(file='Colonization_Overtime_630_Allexperiments_copy.txt', header=TRUE)
#Note the data is reported such that if no colonies were seen on the 10^-2 plate, that sample was reported as having 100 CFU/g feces ie the LOD
##2013 Experiment data, only WT mice
cfu$Cage<-as.factor(cfu$Cage)
cfu.exp13<-cfu[grep("71.",cfu$Cage, value =F),]
#pulls out CFU overtime forthe 2013 experiment only, this is based on the fact that those cages started with 71X numbering
##Remove D13 Data point (due to issue in plating, samples were plated late in the day after collection)
cfu.exp13.d13<-grep("13",cfu.exp13$Day, value =F)
#pulls out data from D13
cfu.exp13.NoD13<-cfu.exp13[-c(cfu.exp13.d13),]
#removes D13 data from 2013 data
##Remove Cage 710 and 711 (they were uninfected)
cfu.710<-grep("710",cfu.exp13.NoD13$Cage, value=F)
cfu.exp13.NoD13<-cfu.exp13.NoD13[-c(cfu.710), ]
#removes cage 710
cfu.711<-grep("711",cfu.exp13.NoD13$Cage, value=F)
#pulls out where cage 711 data points are now you have removed 710 data
cfu.exp13.NoD13<-cfu.exp13.NoD13[-c(cfu.711), ]
cfu.exp13.NoD13$CFU_g <- replace(cfu.exp13.NoD13$CFU_g, cfu.exp13.NoD13$CFU_g==100, 25)
#replaces 100 which is LoD with 25 which is easier to see
##Determine the Median and IQR for CFU grouped by Treatment group
cfu.exp13.NoD13<-summaryMED(cfu.exp13.NoD13, measurevar="CFU_g", metadata=c("Cage","Day"), na.rm=TRUE)
#Plot data
exp2013.plotcfu<-ggplot(cfu.exp13.NoD13, aes(x=Day, y= CFU_g , colour= factor(Cage)))+
geom_errorbar(aes(ymin=firstquart.25per, ymax=thirdquart.75per), width=1, size=0.9)+
geom_line(size=0.9) +
geom_point (size=3)
#theme with white background
SB = exp2013.plotcfu+
#eliminates background, gridlines and key border
theme(
panel.background = element_rect(fill = "white", color = "grey80", size = 2)
,panel.grid.major = element_line(color = "gray90", size = 0.6)
,panel.grid.major.x = element_blank()
,panel.grid.minor = element_blank()
,axis.ticks= element_line(size = 0.6, colour = "grey90")
,axis.ticks.length = unit(0.2, "cm")
,legend.background = element_blank ()
,axis.text.y=element_text(size=13)
,axis.title.y=element_text(size=13)
,axis.title.x=element_blank()
,axis.text.x=element_text(size=11)
)
SB1 = SB + labs(y = expression(paste(Log[10], " CFU ", "per Gram Feces")))
SB2 = SB1+ geom_hline(aes(yintercept=100), colour = "gray10", size = 0.9, linetype=2)
SB3 = SB2 + scale_y_log10(breaks = scales::trans_breaks("log10", function(x) 10^x),labels = scales::trans_format("log10", scales::math_format(10^.x)))
SB3
#Supp Figure 3: Random Forest Model Using Whole Pre-treatment Community
#please see file NEW_Figure5.R for code for this figure
#Supp Figure 4: Relative Abundace of OTU 3(Akkermansia)
shared<-read.delim(file="Adaptiveimmuneclear_noD40.42.0.03.subsample.0.03.filter.0.03.pick.shared")
shared$label=NULL
shared$numOtus=NULL
shared$Total.seqs=apply(shared[,2:419], 1, sum)
shared$RelAbund.OTU3= (shared[,4]/shared$Total.seqs*100)
shared.OTU3= shared[ ,c(1,421)]
row.names(shared.OTU3)=shared.OTU3$Group
shared.OTU3$Group = NULL
shared.OTU3$Cage=sapply(strsplit(row.names(shared.OTU3), ".D"), "[", 1)
shared.OTU3$Mouse=sapply(strsplit(row.names(shared.OTU3), "D."), "[", 1)
shared.OTU3$Day=sapply(strsplit(row.names(shared.OTU3), "D"), "[", 2)
#Abudance of OTU3 Akkermansia D21 post infection
shared.OTU3.D21<-shared.OTU3[shared.OTU3$Day=="21",]
treament.metadata<-read.delim(file="D21.IgGposneg.txt",header = F, row.names = 1)
D21.data<- merge(treament.metadata,shared.OTU3.D21, by='row.names')
plot.D21<-ggplot(D21.data, aes(x=V2, y=RelAbund.OTU3, fill=V2)) +
geom_boxplot() + theme_bw()
igg.pos<-D21.data[D21.data$V2 =="Splenocytes_IgG_positive",3]
splen.igg.neg<-D21.data[D21.data$V2 =="Splenocytes_IgG_negative",3]
veh<-D21.data[D21.data$V2 =="Vehicle_IgG_negative",3]
plot.D21
wilcox.test(splen.igg.neg,igg.pos)
#data: splen.igg.neg and igg.pos
#W = 9, p-value = 0.5508
wilcox.test(splen.igg.neg,veh)
#data: splen.igg.neg and veh
#W = 0, p-value = 0.07143
wilcox.test(igg.pos,veh)
#data: igg.pos and veh
#W = 12, p-value = 0.0199
#Correcting P-values for mutiple comparisons
recip_pvals<-c(0.008087,0.008087, NA)
round(p.adjust(recip_pvals, method = "BH"),3)
#Abudance of OTU3 Akkermansia before any treatment
shared.OTU3.preabx<-shared.OTU3[shared.OTU3$Day=="neg12",]
shared.OTU3.preabx$Group<-c(rep("B",1 ), rep("A",1 ), rep("B",4 ), rep("A",1 ), rep("C",2),rep("B",3 ),rep("C",1), rep("B",3 ) )
#since at D-12 none of the mice have been treated with anything, I added random groups A-C to each mouse
#where:
# group A are the mice that will evenutally be Splenocytes_IgG_negative
# group B are the mice that will evenutally be Splenocytes_IgG_positive
# group C are the mice that will evenutally be Vehicle_IgG_negative
plot.Preabx<-ggplot(shared.OTU3.preabx,aes(x=Group, y=RelAbund.OTU3, fill=Group)) +
geom_boxplot() + scale_y_continuous(limits = c(0,50)) + theme_bw()
plot.Preabx
A<-shared.OTU3.preabx[shared.OTU3.preabx$Group =="A",1]
B<-shared.OTU3.preabx[shared.OTU3.preabx$Group =="B",1]
C<-shared.OTU3.preabx[shared.OTU3.preabx$Group =="C",1]
wilcox.test(A,B)
#data: A and B
#W = 13, p-value = 0.7669
wilcox.test(A,C)
#data: A and C
#W = 5, p-value = 0.4
wilcox.test(B,C)
#data: B and C
#W = 26, p-value = 0.1607
|
3ec6226f2981ea78333fdf00086dfcd3e6040a3a
|
2367f790fd23832252d4453bb4a83ee75ea71f04
|
/man/calc.FXtF2.Rd
|
1b4fcaef6587969b651118d9c731c6c702012705
|
[] |
no_license
|
cran/SpatioTemporal
|
41857dce535b45735c3711786b0294d5b8951f69
|
3149f4a6ba0359d5b9c1a8fd599ce1bcdb855b1b
|
refs/heads/master
| 2021-05-16T01:45:33.120006
| 2019-02-09T15:31:02
| 2019-02-09T15:31:02
| 17,693,753
| 0
| 4
| null | 2021-04-20T21:51:01
| 2014-03-13T03:40:40
|
R
|
UTF-8
|
R
| false
| true
| 2,519
|
rd
|
calc.FXtF2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/c_F_mult.R
\name{calc.FXtF2}
\alias{calc.FXtF2}
\title{Compute Quadratic Form Bewteen Temporal Trends and Sigma B}
\usage{
calc.FXtF2(F, mat, loc.ind, F2 = F, loc.ind2 = loc.ind)
}
\arguments{
\item{F, F2}{(number of obs.) - by - (number of temporal trends) matrices
containing the temporal trends. Usually \code{\link{mesa.model}$F}, where
\code{\link{mesa.model}} is obtained from
\code{\link{createSTmodel}}.}
\item{mat}{A block diagonal, with equal size blocks. The
number of blocks need to equal \code{dim(F)[2]}}
\item{loc.ind, loc.ind2}{A vector indicating which location each row in \code{F}
corresponds to, usually \cr \code{\link{mesa.model}$obs$idx}.}
}
\value{
Returns a square matrix with side \code{dim(F)[1]}
}
\description{
Computes the quadratic form between a sparse matrix \code{F} containing the
temporal trends and the covariance matrix for the beta fields (Sigma_B). Or
possibly the product between two different \code{F}'s and a cross-covariance
matrix.
\cr\cr
See the examples for details.
}
\examples{
require(Matrix)
##create a trend
trend <- cbind(1:5,sin(1:5))
##an index of locations
idx <- c(rep(1:3,3),1:2,2:3)
idx2 <- c(rep(1:2,3),2,2)
##a list of time points for each location/observation
T <- c(rep(1:3,each=3),4,4,5,5)
T2 <- c(rep(1:3,each=2),4,5)
##expand the F matrix to match the locations/times in idx/T.
F <- trend[T,]
F2 <- trend[T2,]
##first column gives time and second location for each observation
cbind(T, idx)
##...and for the second set
cbind(T2, idx2)
##create a cross covariance matrix
C <- makeSigmaB(list(c(1,1),c(1,.5)), crossDist(1:max(idx),1:max(idx2)))
##compute F \%*\% X \%*\% F2'
FXtF2 <- calc.FXtF2(F, C, loc.ind=idx, F2=F2, loc.ind2=idx2)
##which is equivalent to
FXtF2.alt <- expandF(F, idx) \%*\% C \%*\% t( expandF(F2, idx2) )
range(FXtF2 - FXtF2.alt)
\dontshow{
if( max(abs(FXtF2 - FXtF2.alt)) > 1e-13 ){
stop("calc.FXtF2: Results not equal")
}
}
}
\seealso{
Other block matrix functions: \code{\link{blockMult}},
\code{\link{calc.FX}}, \code{\link{calc.mu.B}},
\code{\link{calc.tFXF}}, \code{\link{calc.tFX}},
\code{\link{makeCholBlock}}, \code{\link{makeSigmaB}},
\code{\link{makeSigmaNu}}
Other temporal trend functions: \code{\link{calc.FX}},
\code{\link{calc.tFXF}}, \code{\link{calc.tFX}},
\code{\link{expandF}}
}
\author{
Johan Lindstrom and Adam Szpiro
}
|
1774f936fa486894d8de52be64433a70153848c0
|
13af81ebab307021042b3820f80f9ab71f6fd9d7
|
/man/RentCalculatoR.Rd
|
b9e25deeed2e547e5d1a886cd231c06473cc103f
|
[] |
no_license
|
rp6921/RentCalculatoR
|
39d9f142b48b97f9ea6026c5f198e81d969de5eb
|
18a6dc5828288a0c13be5afe895bcfc679331edd
|
refs/heads/master
| 2022-10-16T07:29:23.640188
| 2020-06-12T22:04:48
| 2020-06-12T22:04:48
| 270,312,991
| 1
| 0
| null | 2020-06-07T13:42:34
| 2020-06-07T13:20:45
|
R
|
UTF-8
|
R
| false
| false
| 4,990
|
rd
|
RentCalculatoR.Rd
|
\name{RentCalculatoR}
\alias{RentCalculatoR}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{RentCalculatoR
%% ~~function to do ... ~~
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
With the package RentCalculatoR you can calculate the change in rent of residential real estate according to Swiss law. Changes in value-adding investments, depreciation, interests and reference interest rates can be taken into account. As well as changings due to changes in the market such as inflation, mortgage and cost increments.
We provide an Example that uses all functions of the RentCalculatoR in one task. See below.
For further information and more specific examples please consult the help pages of the specific function inside the package:
??RentCalculatoR
?RentInformations
?RentInvestCalculatoR
?inf_rate_change
?mortgage_rate_change
?cost_incr_change
?RentCalculatorNewCircum
}
\usage{
}
%- maybe also 'usage' for other objects documented here.
\arguments{
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
Topic related informations about rents in Switzerland:
https://www.mietrecht.ch
General R:
https://stat.ethz.ch/R-manual/
http://www.wolferonline.de/xmlR/XMLinR.pdf
https://de.wikibooks.org/wiki/GNU_R
https://stackoverflow.com/
Creating a package:
https://www.analyticsvidhya.com/blog/2017/03/create-packages-r-cran-github/
http://portal.stats.ox.ac.uk/userdata/ruth/APTS2012/Rcourse10.pdf
https://cran.r-project.org/doc/contrib/Leisch-CreatingPackages.pdf
https://support.rstudio.com/hc/en-us/articles/200486508-Building-Testing-and-Distributing-Packages
https://support.rstudio.com/hc/en-us/articles/200486518
Writing documentation:
https://support.rstudio.com/hc/en-us/articles/200532317-Writing-Package-Documentation
Debugging:
https://stat.ethz.ch/pipermail/r-help/2014-May/374864.html
https://stackoverflow.com/questions/26697727/what-does-error-in-namespaceexportns-exports-undefined-exports-mean
Publishing on a GitHub-Account:
https://www.analyticsvidhya.com/blog/2017/03/create-packages-r-cran-github/
}
\author{
%% ~~who you are~~
Ruth Peterhans ruth.maria.peterhans@usi.ch, Giuliano Giovanardi, giuliano.giovanardi@usi.ch
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##################################################################################################
# EXAMPLE for use of all functions in the RentCalculatoR in one task (PART A to C in the code)
# ----------------------------------------------------------------------------------------------------------
# If you have an apartment that is totally renovated and you have to calculate the new
# rent due to the investments and also due to changings in the market you can use
# the RentInvestCalculatoR together with the RentCalculatorNewCircum very easy.
# You take the two as data frames functions and add them to a new variable.
# Let's say you invested 100000 CHF with a value increasing share of 50 % and a lifespan of 12 years,
# while the rent before the investment was 1000 CHF.
invest <- as.data.frame(RentInvestCalculatoR(1000,100000,50,12))
# Let's say the last adjustments to inflation was on the 01.10.2016, the one to mortgage on the 3.9.2013
# and to the costs on 1.6.2019 you use (supposing the flat rate for the cost increase is 1 %):
market <- as.data.frame(RentCalculatorNewCircum(1000, "2016-10-01", "2013-09-03", "2019-06-01", 1))
# and then add the data with new added monthly value of the investment into the subset of market and
# add up the the sum of market changes (row 1 to 4) and the monthly added rent due to investment
# together to a new line which gives you the TOTAL result:
OverAllNewRent <- rbind(market[(1:4),],
c("The change in rent per month due to investment is:", invest$total_add_rent_monthly_CHF),
c("And the new TOTAL rent per month in CHF is:", sum(as.numeric(market[(1:4),2]))
+invest$total_add_rent_monthly_CHF)
)
%% ~~##---- Should be DIRECTLY executable !! ----~~~
%% ~~##-- ==> Define data, use random,~~~
%% ~~##-- or do help(data=index) for the standard data sets.~~~
%% ~~## The function is currently defined as~~~
%% ~~function (x) ~~~
%% ~~{~~~
%% ~~ }~~~
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory (show via RShowDoc("KEYWORDS")):
% \keyword{ ~kwd1 }
% \keyword{ ~kwd2 }
% Use only one keyword per line.
% For non-standard keywords, use \concept instead of \keyword:
% \concept{ ~cpt1 }
% \concept{ ~cpt2 }
% Use only one concept per line.
|
561b7521c9b6d50d7511470d2b8356652141b7ea
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/dse/examples/checkResiduals.Rd.R
|
5d1fde381ff155b8f1355e9502d248cc0265b532
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 336
|
r
|
checkResiduals.Rd.R
|
library(dse)
### Name: checkResiduals
### Title: Autocorrelations Diagnostics
### Aliases: checkResiduals checkResiduals.default checkResiduals.TSdata
### checkResiduals.TSestModel
### Keywords: ts
### ** Examples
data("eg1.DSE.data.diff", package="dse")
model <- estVARXls(eg1.DSE.data.diff)
checkResiduals(model)
|
58a53eb58fef88ac7f877431bd9112b57149c69a
|
6eed4337c1a918c2e615198699b8271ac8d25ffc
|
/R_basics/13_LogisticRegression.R
|
b6f250e3e6b983db39500a6f5774f5e1ff675310
|
[] |
no_license
|
Niks056/R_basics
|
b1653d6d0cb0d6f31033fa1c822a513272c5d43d
|
67fb11246ebb5757a0f3d19543361bae23586064
|
refs/heads/master
| 2022-12-12T17:18:54.513429
| 2020-09-10T10:00:21
| 2020-09-10T10:00:21
| 294,370,966
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,108
|
r
|
13_LogisticRegression.R
|
setwd("C:\\Users\\91869\\Documents\\r pROGRAMS\\R")
data1<-read.csv("loan predictionLog.csv",na.strings = c("NA","NaN",""),stringsAsFactors = FALSE)
str(data1)
data1$Loan_Status<- ifelse(data1$Loan_Status=='Y',1,0)
data1$Loan_Status<- as.factor(data1$Loan_Status)
#Excluding LoanId column
col<-ncol(data1)
data2=data1[,2:col]
summary(data2)
#Splitting
nrow(data1)
set.seed(10)
n<- nrow(data2)
index1 <- sample(1:n, size = round(0.75*n),replace=FALSE)
x_train<- data2[index1 ,]
x_test<-data2[-index1 ,]
model1<-glm(Loan_Status~.,data=x_train,family=binomial)
#genralised linear Model
#model1<-glm(Loan_Status~ApplicantIncome+CoapplicantIncome+Education+LoanAmount,data=x_train,family="binomial")
#. is used to stop taking all columns and writing columns only what we wanted.
summary(model1)
x_test1<- x_test[,1:ncol(x_test)-1] #-1 is used to remove the last coulumns i.e loan_status.
preds<-predict(model1,x_test1)
preds
preds<-ifelse(preds>=.5,1,0)
preds<-as.factor(preds)
tb<-table(preds , x_test$Loan_Status)
tb
accuracy<-(tb[1,1]+tb[2,2])/sum(tb)
accuracy
|
01128c59b4e24a8941ca08e42f6dc6de62b2dda5
|
afdabf3f753220a8341d99fd45849ab84e768a5a
|
/man/CkVarType.Rd
|
6ee728973aec51b4e60fda8972fde2683c8c0217
|
[] |
no_license
|
vmoprojs/GeoModels
|
92b4292cc87c1e4eeed674010f4d214783d7546e
|
55b885c10288a9d3cf5132807b80cca143869f7a
|
refs/heads/master
| 2023-08-30T23:28:00.066551
| 2023-08-29T03:45:09
| 2023-08-29T03:45:09
| 122,672,674
| 4
| 13
| null | 2021-03-28T20:13:41
| 2018-02-23T21:05:54
|
C
|
UTF-8
|
R
| false
| false
| 1,046
|
rd
|
CkVarType.Rd
|
\name{CkVarType}
\alias{CkVarType}
\encoding{UTF-8}
\title{Checking Variance Estimates Type}
\description{
Subroutine called by InitParam.
The procedure controls the method used to compute the estimates' variances.
}
\usage{
CkVarType(type)
}
\arguments{
\item{type}{String; the method used to compute the estimates'
variances. If \code{SubSamp} the estimates' variances
are computed by the sub-sampling method, see \code{\link{GeoFit}}.}
}
\value{The function returns a numeric positive integer, or NULL if the method is invalid.}
\details{The procedure controls the method used to compute the estimates' variances}
\seealso{\code{\link{GeoFit}}}
\author{Moreno Bevilacqua, \email{moreno.bevilacqua89@gmail.com},\url{https://sites.google.com/view/moreno-bevilacqua/home},
Víctor Morales Oñate, \email{victor.morales@uv.cl}, \url{https://sites.google.com/site/moralesonatevictor/},
Christian", Caamaño-Carrillo, \email{chcaaman@ubiobio.cl},\url{https://www.researchgate.net/profile/Christian-Caamano}
}
\keyword{Composite}
|
efb93dbeb830163419403f85259a1496aaf6c59e
|
e5e9a24cddbe5af33623dda455051b1e67646ff7
|
/project(1).R
|
590d07190d1ff0ef7cd86fac1f4e50cc51fc79e5
|
[] |
no_license
|
bielusha/hahaha
|
94adbd2fa8d81c325f07a80d6cae8c809762a97d
|
6c4674589691cc931f74842fc6396cdf13689c99
|
refs/heads/master
| 2021-01-19T16:26:12.116879
| 2017-04-15T00:38:06
| 2017-04-15T00:38:06
| 88,264,290
| 0
| 0
| null | 2017-04-15T00:04:00
| 2017-04-14T12:04:40
|
R
|
UTF-8
|
R
| false
| false
| 1,862
|
r
|
project(1).R
|
rm(list=ls())
library(quantmod)
library(fGarch)
#(a)
getSymbols('VIIIX',from='1998-1-1',to='2017-4-7')
getSymbols('VGTSX',from='1998-1-1',to='2017-4-7')
priceviiix<-VIIIX$VIIIX.Close
pricevgtsx<-VGTSX$VGTSX.Close
retviiix <- diff(log(priceviiix))
retvgtsx <- diff(log(pricevgtsx))
retviiix <- retviiix [-1,]
retvgtsx <-retvgtsx [-1,]
T <- length(retviiix)
windowlength <- 50
rollingCov <- numeric(T)
for (i in 50:T){
rollingCov[i] <- cov(retviiix[(i-49):i], retvgtsx[(i-49):i])
}
plot(rollingCov, type='l', col='blue')
#(b)
fit1 <- garchFit( formula = ~garch(1, 1), data = retviiix, trace = FALSE)
sigma1 <- sqrt(fit1@h.t)
retviiixstand <- retviiix/sigma1
fit2 <- garchFit( formula = ~garch(1, 1), data = retvgtsx, trace = FALSE)
sigma2 <- sqrt(fit2@h.t)
retvgtsxstand <- retvgtsx/sigma2
lambda <- 0.94
q11 <- numeric(T)
q12 <- numeric(T)
q22 <- numeric(T)
for (i in 2:T){
q11[i] <- (1-lambda)*retviiixstand[i-1]^2 + lambda*q11[i-1]
q12[i] <- (1-lambda)*retviiixstand[i-1]*retvgtsxstand[i-1] + lambda*q12[i-1]
q22[i] <- (1-lambda)*retvgtsxstand[i-1]^2 + lambda*q22[i-1]
}
exponentialCorr <- q12/sqrt(q11*q22)
plot(exponentialCorr, type='l', col='blue')
#(c)
alpha <- 0.05
beta <- 0.9
p11 <- numeric(T)
p12 <- numeric(T)
p22 <- numeric(T)
p11lr <- mean(retviiixstand^2)
p12lr <- mean(retviiixstand*retvgtsxstand)
p22lr <- mean(retvgtsxstand^2)
for (i in 2:T){
p11[i] <- p11lr + alpha*(retviiixstand[i-1]^2 - p11lr) + beta*(p11[i-1]-p11lr)
p12[i] <- p12lr + alpha*(retviiixstand[i-1]*retvgtsxstand[i-1] - p12lr) + beta*(p12[i-1]-p12lr)
p22[i] <- p22lr + alpha*(retvgtsxstand[i-1]^2 - p22lr) + beta*(p22[i-1]-p22lr)
}
GarchCorr <- p12/sqrt(p11*p22)
plot(GarchCorr, type='l', col='blue')
points(exponentialCorr, type='l', col='red')
|
eb4e4ef19b11d059839b3e72aa3f4a7415a063fd
|
fe50588a00b21024546902728bc6ae66ae4ec846
|
/run_analysis.r
|
55d9d7e0421493b07940465fb0bcb2a3832a04e7
|
[] |
no_license
|
willysousa/datasciencecoursera
|
2353208f3f2ae1e7d829e8578d199c001825dc86
|
33fed3939f05148ebdab824f60e0cc17f5d6611c
|
refs/heads/master
| 2016-09-05T13:24:46.491722
| 2014-09-21T04:04:19
| 2014-09-21T04:04:19
| 22,669,085
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,441
|
r
|
run_analysis.r
|
library("data.table")
library("reshape2")
table1 <- read.table("./Unit2/project/UCI HAR Dataset/activity_labels.txt")[,2]
table2 <- read.table("./Unit2/project/UCI HAR Dataset//features.txt")[,2]
table3 <- grepl("mean|std", table2)
table4 <- read.table("./Unit2/project/UCI HAR Dataset/test/X_test.txt")
table5 <- read.table("./Unit2/project/UCI HAR Dataset/test/y_test.txt")
table6 <- read.table("./Unit2/project/UCI HAR Dataset/test/subject_test.txt")
names(table4) = table2
table4 = table4[,table3]
table5[,2] = table1[table5[,1]]
names(table5) = c("Activity_ID", "Activity_Label")
names(table6) = "subject"
testes <- cbind(as.data.table(table6), table5, table4)
table7 <- read.table("./Unit2/project/UCI HAR Dataset/train/X_train.txt")
table8 <- read.table("./Unit2/project/UCI HAR Dataset/train/y_train.txt")
table9 <- read.table("./Unit2/project/UCI HAR Dataset/train/subject_train.txt")
names(table7) = table2
table7 = table7[,table3]
table8[,2] = table1[table8[,1]]
names(table8) = c("Activity_ID", "Activity_Label")
names(table9) = "subject"
testes2 <- cbind(as.data.table(table9), table8, table7)
combinado = rbind(testes, testes2)
idlabels = c("subject", "Activity_ID", "Activity_Label")
datal = setdiff(colnames(combinado), idlabels)
mdata = melt(combinado, id = idlabels, measure.vars = datal)
organizado = dcast(mdata, subject + table1 ~ variable, mean)
write.table(organizado, file = "./Unit2/project/tidy_data.txt")
|
b8c4b74ba0e7ac63e928ee30bb0ba422da00a450
|
648bae9ec2bd795413f067ce43d966eb8902939b
|
/man/weighted_sum_ga.Rd
|
36c386c803228da6c70edad0fc200217e2f37a44
|
[
"Apache-2.0"
] |
permissive
|
jiripetrlik/r-multiobjective-evolutionary-algorithms
|
e1b129ad710bcd9a5a6fd45095dba916c357fe37
|
dcbdcb943f9c6ebd53b7522cb0762de69c52d591
|
refs/heads/master
| 2020-06-12T19:29:11.397358
| 2020-04-26T19:58:46
| 2020-04-26T19:58:46
| 194,402,707
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,032
|
rd
|
weighted_sum_ga.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/weigted_sum_ga.R
\name{weighted_sum_ga}
\alias{weighted_sum_ga}
\title{Weighted sum genetic algorithm}
\usage{
weighted_sum_ga(objective_functions_list, weights, chromosome_size,
chromosome_type = "binary", population_size = 100,
number_of_iterations = 100, elitism = TRUE, nc = 2,
mutation_probability = 0.05, uniform_mutation_sd = 0.01)
}
\arguments{
\item{objective_functions_list}{List of objective functions}
\item{weights}{Objective functions weights}
\item{chromosome_size}{Size of chromosome which represents candidate solutions}
\item{chromosome_type}{Chromosome type ("binary" or "numeric")}
\item{population_size}{Number of solutions evaluated in one iteration of genetic algorithm}
\item{number_of_iterations}{Number of iterations (generations) of genetic algorithm}
\item{elitism}{Use elitism}
\item{nc}{NC for SBX crossover (valid if "numeric" chromosome is used)}
\item{mutation_probability}{Probability of mutation (valid if "binary" chromosome is used)}
\item{uniform_mutation_sd}{Standard deviation of mutation (valid if "numeric" chromosome is used)}
}
\value{
List which contains results of weighted sum genetic algorithm:
\code{value} - Sum of weighted objective funtions values for the best solution
\code{best_solution} - Chromosome which represents the best solution
\code{best_solution_index} - Index of the best solution in population
\code{statistics} - Statistics about run of genetic algorithm
\code{parameters} - Parameters of genetic algorithm
\code{values} - Values of objective functions for the best solution
\code{weighted_values} - Values of objective functions multiplied by
weights for the best solution
}
\description{
Use weighted sum approach to solve multiobjective optimization problem.
Weighted sum approach transforms multiobjective optimization problem
to single objective by multiplying objective functions values by weights.
Then genetic algorithm is used to find optimal solution.
}
|
4195792b113eb2edd68af817206843f16b8ca4ab
|
1744354b7b694860f0b2e4b7a2646bbde216fbd3
|
/maria_sebastian.R
|
67a7667a2a380f53d3810dff9f8a404f8b8a27da
|
[] |
no_license
|
relund/mdpClass2020
|
9f4be5933a0c1ed87f2075adeaa94c5a9ca993a8
|
a0a6fb1f7082cf565691962c4a8aabcefcece32d
|
refs/heads/master
| 2022-06-16T04:44:51.923979
| 2020-05-07T13:32:10
| 2020-05-07T13:32:10
| 259,257,266
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,617
|
r
|
maria_sebastian.R
|
library(MDP2)
h= matrix(c( 0.000, 0.000, 0.000, 0.000, 0.000, 1.000, 0.000, 0.770, 0.176, 0.000, 0.020,
0.034, 0.000, 0.117, 0.677, 0.058, 0.075, 0.073, 0.000, 0.067, 0.200, 0.279, 0.233, 0.221,
0.000, 0.000, 0.167, 0.167, 0.240, 0.426, 0.000, 0.000, 0.000, 0.000, 0.000, 1.000), # the data elements
nrow=6, # number of rows
ncol=6, # number of columns
byrow = TRUE)
l=matrix(c(0,0,0,1,0,0,0,1,0.0021,0.0025,0.0041,0.9913,0.1005,0.0913,0.1170,0.6912,0.3508,0.2786,0.2554,0.1152,0,0,0,1), nrow=6, # number of rows
ncol=4, # number of columns
byrow = TRUE)
trans_W<-matrix(rep(0,len=144),nrow=6)
trans_W[1 ,]<-c(h[1,1]*l[1,1],h[1,1]*l[1,2],h[1,1]*l[1,3],h[1,1]*l[1,4],h[1,2]*l[2,1],h[1,2]*l[2,2],h[1,2]*l[2,3],h[1,2]*l[2,4],h[1,3]*l[3,1],h[1,3]*l[3,2],h[1,3]*l[3,3],h[1,3]*l[3,4],h[1,4]*l[4,1],h[1,4]*l[4,2],h[1,4]*l[4,3],h[1,4]*l[4,4],h[1,5]*l[5,1],h[1,5]*l[5,2],h[1,5]*l[5,3],h[1,5]*l[5,4],h[1,6]*l[6,1],h[1,6]*l[6,2],h[1,6]*l[6,3],h[1,6]*l[6,4])
trans_W[2, ]<-c(h[2,1]*l[1,1],h[2,1]*l[1,2],h[2,1]*l[1,3],h[2,1]*l[1,4],h[2,2]*l[2,1],h[2,2]*l[2,2],h[2,2]*l[2,3],h[2,2]*l[2,4],h[2,3]*l[3,1],h[2,3]*l[3,2],h[2,3]*l[3,3],h[2,3]*l[3,4],h[2,4]*l[4,1],h[2,4]*l[4,2],h[2,4]*l[4,3],h[2,4]*l[4,4],h[2,5]*l[5,1],h[2,5]*l[5,2],h[2,5]*l[5,3],h[2,5]*l[5,4],h[2,6]*l[6,1],h[2,6]*l[6,2],h[2,6]*l[6,3],h[2,6]*l[6,4])
trans_W[3, ]<-c(h[3,1]*l[1,1],h[3,1]*l[1,2],h[3,1]*l[1,3],h[3,1]*l[1,4],h[3,2]*l[2,1],h[3,2]*l[2,2],h[3,2]*l[2,3],h[3,2]*l[2,4],h[3,3]*l[3,1],h[3,3]*l[3,2],h[3,3]*l[3,3],h[3,3]*l[3,4],h[3,4]*l[4,1],h[3,4]*l[4,2],h[3,4]*l[4,3],h[3,4]*l[4,4],h[3,5]*l[5,1],h[3,5]*l[5,2],h[3,5]*l[5,3],h[3,5]*l[5,4],h[3,6]*l[6,1],h[3,6]*l[6,2],h[3,6]*l[6,3],h[3,6]*l[6,4])
trans_W[4, ]<-c(h[4,1]*l[1,1],h[4,1]*l[1,2],h[4,1]*l[1,3],h[4,1]*l[1,4],h[4,2]*l[2,1],h[4,2]*l[2,2],h[4,2]*l[2,3],h[4,2]*l[2,4],h[4,3]*l[3,1],h[4,3]*l[3,2],h[4,3]*l[3,3],h[4,3]*l[3,4],h[4,4]*l[4,1],h[4,4]*l[4,2],h[4,4]*l[4,3],h[4,4]*l[4,4],h[4,5]*l[5,1],h[4,5]*l[5,2],h[4,5]*l[5,3],h[4,5]*l[5,4],h[4,6]*l[6,1],h[4,6]*l[6,2],h[4,6]*l[6,3],h[4,6]*l[6,4])
trans_W[5, ]<-c(h[5,1]*l[1,1],h[5,1]*l[1,2],h[5,1]*l[1,3],h[5,1]*l[1,4],h[5,2]*l[2,1],h[5,2]*l[2,2],h[5,2]*l[2,3],h[5,2]*l[2,4],h[5,3]*l[3,1],h[5,3]*l[3,2],h[5,3]*l[3,3],h[5,3]*l[3,4],h[5,4]*l[4,1],h[5,4]*l[4,2],h[5,4]*l[4,3],h[5,4]*l[4,4],h[5,5]*l[5,1],h[5,5]*l[5,2],h[5,5]*l[5,3],h[5,5]*l[5,4],h[5,6]*l[6,1],h[5,6]*l[6,2],h[5,6]*l[6,3],h[5,6]*l[6,4])
trans_W[6, ]<-c(h[6,1]*l[1,1],h[6,1]*l[1,2],h[6,1]*l[1,3],h[6,1]*l[1,4],h[6,2]*l[2,1],h[6,2]*l[2,2],h[6,2]*l[2,3],h[6,2]*l[2,4],h[6,3]*l[3,1],h[6,3]*l[3,2],h[6,3]*l[3,3],h[6,3]*l[3,4],h[6,4]*l[4,1],h[6,4]*l[4,2],h[6,4]*l[4,3],h[6,4]*l[4,4],h[6,5]*l[5,1],h[6,5]*l[5,2],h[6,5]*l[5,3],h[6,5]*l[5,4],h[6,6]*l[6,1],h[6,6]*l[6,2],h[6,6]*l[6,3],h[6,6]*l[6,4])
trans_W
trans_T<-matrix(rep(0,len=144),nrow=6)
trans_T[, 2]<-c(1,1,1,1,1,0)
trans_T[6,24]<-1
trans_T
r_w=c(0,1,1,1,1,0)
r_t=c(0,20,15,10,5,0)
R=cbind(r_w,r_t)
states<-c("transplant",1,2,3,4,"dead")
PList <- list(trans_W,trans_T)
# mdp<-list(PList = list(trans_W,trans_T, CMat = R))
# mdp
# build model
# D <- mdp$CMat
# D[, ] = 1 # set length of each stage to 1 (since not a semi-MDP)
## Try to build the MDP in steps
# Step 1: dummy actions (seems to work)
w<-binaryMDPWriter()
w$setWeights(c("Duration", "Net reward"))
w$process()
w$stage()
for (i in 1:length(R[,1])) {
w$state(label=states[i])
w$action(label="W", id=0, pr = 1, weights = c(1,1), end=T)
w$action(label="T", id=0, pr = 1, weights = c(1,1), end=T)
w$endState()
}
w$endStage()
w$endProcess() # Missing parenthesis here!
w$closeWriter()
mdp<-loadMDP()
# Step 2: model with output instead of actions (since 6 states the id of states must be 0, 1, ..., 5)
w<-binaryMDPWriter()
w$setWeights(c("Duration", "Net reward"))
w$process()
w$stage()
for (i in 1:length(R[,1])) {
w$state(label=states[i])
id=which(PList[[1]][i ,]>0)
cat("idW:", id, " "); cat("pr sum:", sum(PList[[1]][i,id]), "\n")
# w$action(label="W", id=id-1,pr = PList[[1]][i,id], weights = c(1,R[i,1]), end=T)
id=which(PList[[2]][i,]>0)
cat("idT:", id, " "); cat("pr sum:", sum(PList[[1]][i,id]), "\n")
# w$action(label="T", id = id-1, pr = PList[[2]][i,id], weights = c(1,R[i,2]), end=T)
w$endState()
}
w$endStage()
w$endProcess()
w$closeWriter()
# mdp<-loadMDP()
# Your ids must be in the range 0:5!
mdp<-loadMDP()
infoMDP(mdp, withDF = T)$actionDF
policyIteDiscount(mdp, "Net reward", "Duration", discountFactor = 0.99)
getPolicy(mdp)
|
3d5df443e102f9d674c870940e7158720e05636b
|
645bfd42abf0a53f0194819697ec54e32b033652
|
/tests/testthat/test-error.R
|
086a70bd5eae7381e03aedfcdba2cc8df2adcc01
|
[
"MIT"
] |
permissive
|
r-lib/callr
|
a0b319f93588f5e1e377882735118b7d836fffdb
|
e3e0acffdd817bb1c0f7fc5054f93b1d65ed8a45
|
refs/heads/main
| 2023-04-14T00:17:09.895755
| 2023-04-05T19:59:37
| 2023-04-05T19:59:37
| 58,728,879
| 249
| 37
|
NOASSERTION
| 2023-04-05T19:59:39
| 2016-05-13T10:26:09
|
R
|
UTF-8
|
R
| false
| false
| 5,737
|
r
|
test-error.R
|
test_that("error is propagated, .Last.error is set", {
expect_r_process_snapshot(
callr::r(function() 1 + "A", error = "error"),
.Last.error,
transform = redact_srcref
)
})
test_that("error is propagated, printed if non-interactive mode", {
expect_r_process_snapshot(
callr::r(function() 1 + "A", error = "error"),
interactive = FALSE,
transform = redact_srcref
)
})
test_that("error stack is passed, .Last.error is set", {
expect_r_process_snapshot(
callr::r(
function() {
f <- function() g()
g <- function() 1 + "A"
f()
},
error = "stack"
),
.Last.error,
transform = redact_srcref
)
})
test_that("error behavior can be set using option", {
withr::local_options(callr.error = "error")
expect_snapshot(
error = TRUE,
r(function() 1 + "A")
)
withr::local_options(callr.error = "stack")
expect_snapshot(
error = TRUE,
r(
function() {
f <- function() g()
g <- function() 1 + "A"
f()
}
)
)
})
test_that("parent errors", {
withr::local_options(list("callr.error" = "error"))
expect_snapshot({
err <- tryCatch(
r(function() 1 + "A"),
error = function(e) e
)
err$parent
})
})
test_that("parent errors, another level", {
withr::local_options(list("callr.error" = "error"))
expect_snapshot({
err <- tryCatch(
callr::r(function() {
withr::local_options(list("callr.error" = "error"))
callr::r(function() 1 + "A")
}),
error = function(e) e
)
err$parent
err$parent$parent
})
})
test_that("error traces are printed recursively", {
expect_r_process_snapshot(
callr::r(function() callr::r(function() 1 + "a")),
interactive = FALSE,
transform = redact_srcref
)
})
test_that("errors in r_bg() are merged", {
withr::local_options(list("callr.error" = "error"))
p <- r_bg(function() 1 + "A")
on.exit(p$kill(), add = TRUE)
p$wait(2000)
expect_snapshot(
error = TRUE,
p$get_result()
)
})
test_that("errors in r_process are merged", {
withr::local_options(list("callr.error" = "error"))
opts <- r_process_options(func = function() 1 + "A")
p <- r_process$new(opts)
on.exit(p$kill(), add = TRUE)
p$wait(2000)
expect_snapshot(
error = TRUE,
p$get_result()
)
})
test_that("errors in r_session$run() are merged", {
rs <- r_session$new()
on.exit(rs$kill(), add = TRUE)
expect_snapshot(
error = TRUE,
rs$run(function() 1 + "A")
)
expect_snapshot(
error = TRUE,
rs$run(function() 1 + "A")
)
})
test_that("errors in r_session$call() are merged", {
rs <- r_session$new()
on.exit(rs$kill(), add = TRUE)
rs$call(function() 1 + "A")
rs$poll_process(2000)
expect_snapshot(rs$read()$error)
rs$call(function() 1 + "A")
rs$poll_process(2000)
expect_snapshot(rs$read()$error)
})
test_that("child error is not modified", {
expect_snapshot({
err <- tryCatch(callr::r(function() stop("foobar")), error = function(e) e)
err
class(err)
class(err$parent)
})
})
test_that("new_callr_error, timeout", {
expect_r_process_snapshot(
callr::r(function() Sys.sleep(3), timeout = 1/5),
transform = redact_srcref
)
expect_snapshot(
error = TRUE,
callr::r(function() Sys.sleep(3), timeout = 1/5)
)
})
test_that("interrupting an R session", {
# Not a great test, because it is timing dependent, especially bad
# on Windows, where it takes a bit longer to start running the command.
skip_on_cran()
rs <- r_session$new()
on.exit(rs$close(), add = TRUE)
rs$call(function() Sys.sleep(3))
# wait a bit so it starts running
Sys.sleep(0.2)
rs$interrupt()
rs$poll_io(3000)
expect_snapshot(
rs$read(),
transform = redact_callr_rs_result
)
})
test_that("format.call_status_error", {
err <- tryCatch(
callr::r(function() 1 + ""),
error = function(e) e
)
expect_snapshot(format(err))
expect_snapshot(print(err))
err <- tryCatch(
callr::r(function() 1 + "", error = "stack"),
error = function(e) e
)
expect_snapshot(format(err))
expect_snapshot(print(err))
})
test_that("format.call_status_error 2", {
expect_r_process_snapshot(
withr::local_options(rlib_error_always_trace = TRUE),
err <- tryCatch(
callr::r(function() 1 + ""),
error = function(e) e
),
writeLines(format(err, trace = TRUE)),
interactive = FALSE,
transform = redact_srcref
)
})
test_that("stdout/stderr is printed on error", {
expect_r_process_snapshot(
callr::r(function() {
warning("I have a bad feeling about this")
stop("told ya")
}),
.Last.error,
.Last.error$stderr,
interactive = TRUE,
transform = function(x) fix_eol(redact_srcref(x))
)
})
test_that("stdout/stderr is printed on error 2", {
expect_r_process_snapshot(
callr::r(function() {
writeLines("Just some output")
stop("told ya")
}),
.Last.error,
.Last.error$stdout,
interactive = TRUE,
transform = function(x) fix_eol(redact_srcref(x))
)
})
test_that("stdout/stderr is printed on error 3", {
expect_r_process_snapshot(
callr::r(function() {
writeLines("Just some output")
warning("I have a bad feeling about this")
stop("told ya")
}),
interactive = FALSE,
transform = redact_srcref
)
})
test_that("error is printed to file", {
tmp <- tempfile("callr-test")
on.exit(unlink(tmp), add = TRUE)
err <- tryCatch(
callr::r(function() stop("ouch"), stderr = tmp),
error = function(e) e
)
expect_snapshot(
err$stderr,
transform = function(x) fix_eol(redact_srcref(x))
)
expect_snapshot(readLines(tmp))
})
|
11badc89ce7582fb614902ff26fda60ace315394
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/stream/man/DSO_Sampling.Rd
|
a2e5954be339445eb0226032e0a56c6f0534791b
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,989
|
rd
|
DSO_Sampling.Rd
|
\name{DSO_Sample}
\alias{DSO_Sample}
\title{Sampling from a Data Stream (Data Stream Operator)}
\description{
Extracts a sample form a data stream using Reservoir Sampling.
}
\usage{
DSO_Sample(k = 100, biased = FALSE)
}
\arguments{
\item{k}{the number of points to be sampled from the stream.}
\item{biased}{if \code{FALSE} then a regular (unbiased)
reservoir sampling is used. If true then
the sample is biased towards keeping more recent data points
(see Details section).}
}
\details{
If \code{biased=FALSE} then the reservoir sampling algorithm by McLeod
and Bellhouse (1983) is used. This sampling makes sure that each
data point has the same chance to be sampled. All sampled points
will have a weight of 1. Note that this might not be ideal for an evolving
stream since very old data points have the same chance to be in the sample
as newer points.
If \code{bias=TRUE} then sampling prefers newer points using the modified
reservoir sampling algorithm 2.1 by Aggarwal (2006). New points are always
added. They replace a random point in thre reservoir with a probability
of reservoir size over \code{k}. This an exponential bias function of
\eqn{2^{-lambda}} with \eqn{lambda=1/k}.
}
\value{
An object of class \code{DSO_Sample} (subclass of
\code{DSO}).
}
\seealso{
\code{\link{DSO}}
}
\references{
Vitter, J. S. (1985): Random sampling with a reservoir. ACM Transactions
on Mathematical Software, 11(1), 37-57.
McLeod, A.I., Bellhouse, D.R. (1983): A Convenient Algorithm for Drawing a Simple Random Sample. Applied Statistics, 32(2), 182-184.
Aggarwal C. (2006) On Biased Reservoir Sampling in the Presence of Stream Evolution. International Conference on Very Large Databases (VLDB'06). 607-618.
}
\author{
Michael Hahsler
}
\examples{
stream <- DSD_Gaussians(k=3, noise=0.05)
sample <- DSO_Sample(k=20)
update(sample, stream, 500)
sample
# plot points in sample
plot(get_points(sample))
}
|
18f4d72a4e57cdf262fb61a5ade52767bb2195ed
|
d42e07aa8a9f17049b361e399c7bcd592cd02a09
|
/slide4.R
|
14464bee047704965013c29a88d934205f63a1ee
|
[] |
no_license
|
thoughtfulbloke/digitalhumans
|
2e67d58077789964c1f448cf80a6473d4974abdb
|
5f87bab67cd9474434656990c7c4d8e524628232
|
refs/heads/master
| 2020-08-14T16:40:03.758457
| 2019-10-15T03:48:19
| 2019-10-15T03:48:19
| 215,201,107
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 662
|
r
|
slide4.R
|
library(atus)
library(dplyr)
library(ggplot2)
library(ggthemes)
data(atusact)
data(atusresp)
slide <- atusresp %>% filter(tuyear==2016) %>%
left_join(atusact %>% filter(tiercode >= 180000, tiercode < 190000), by="tucaseid") %>%
count(dur) %>% filter(dur<=120) %>%
ggplot(aes(x=dur, y=n, xend=dur)) + ylab("\nCount of actions\n") +
ggtitle("American time use survey, 2016, activities of two hours or less:
minutes spent journeying (travelling for a purpose)") +
xlab("\nNumber of minutes of survey day\n") +
labs(caption = "source: BLS via R package ATUS\n") +
geom_segment(yend=0, size=1, colour=pal_otago[4]) + theme_otago_ppt
# print(slide)
|
d223c24400e22678a58364a88cf9c4ee3f515eb6
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.database/man/redshiftdataapiservice_cancel_statement.Rd
|
fcad69887e66068df44e1ec9cebdfcf356b3b177
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 839
|
rd
|
redshiftdataapiservice_cancel_statement.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/redshiftdataapiservice_operations.R
\name{redshiftdataapiservice_cancel_statement}
\alias{redshiftdataapiservice_cancel_statement}
\title{Cancels a running query}
\usage{
redshiftdataapiservice_cancel_statement(Id)
}
\arguments{
\item{Id}{[required] The identifier of the SQL statement to cancel. This value is a
universally unique identifier (UUID) generated by Amazon Redshift Data
API. This identifier is returned by \code{BatchExecuteStatment},
\code{ExecuteStatment}, and
\code{\link[=redshiftdataapiservice_list_statements]{list_statements}}.}
}
\description{
Cancels a running query. To be canceled, a query must be running.
See \url{https://www.paws-r-sdk.com/docs/redshiftdataapiservice_cancel_statement/} for full documentation.
}
\keyword{internal}
|
47fdcdeded7bd2ef30e4b513ddc0c285699aea9f
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/plsgenomics/R/rirls.spls.aux.R
|
d05e3f99adfc1cff2240b5a8ffc9dd4fc0abc910
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,284
|
r
|
rirls.spls.aux.R
|
### rirls.spls.aux.R (2014-10)
###
### Ridge Iteratively Reweighted Least Squares followed by Adaptive Sparse PLS regression for binary responser
### Short version for multiple call in cross-validation procedure
###
### Copyright 2014-10 Ghislain DURIF
###
### Adapted from rpls function in plsgenomics package, copyright 2006-01 Sophie Lambert-Lacroix
###
### This file is part of the `plsgenomics' library for R and related languages.
### It is made available under the terms of the GNU General Public
### License, version 2, or at your option, any later version,
### incorporated herein by reference.
###
### This program is distributed in the hope that it will be
### useful, but WITHOUT ANY WARRANTY; without even the implied
### warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
### PURPOSE. See the GNU General Public License for more
### details.
###
### You should have received a copy of the GNU General Public
### License along with this program; if not, write to the Free
### Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
### MA 02111-1307, USA
rirls.spls.aux <- function(sXtrain, sXtrain.nosvd=NULL, Ytrain, lambda.ridge, lambda.l1, ncomp, sXtest, sXtest.nosvd=NULL, adapt=TRUE, maxIter=100, svd.decompose=TRUE, meanXtrain, sigma2train) {
#####################################################################
#### Initialisation
#####################################################################
sXtrain <- as.matrix(sXtrain)
ntrain <- nrow(sXtrain) # nb observations
p <- ncol(sXtrain) # nb covariates
index.p <- c(1:p)
Ytrain <- as.matrix(Ytrain)
q = ncol(Ytrain)
one <- matrix(1,nrow=1,ncol=ntrain)
ntest <- nrow(sXtest)
#####################################################################
#### Ridge IRLS step
#####################################################################
fit <- wirrls(Y=Ytrain, Z=cbind(rep(1,ntrain),sXtrain), Lambda=lambda.ridge, NbrIterMax=maxIter, WKernel=diag(rep(1,ntrain)))
converged=fit$Cvg
# Check WIRRLS convergence
if (converged==0) {
warning("Message from rirls.spls : Ridge IRLS did not converge; try another lambda.ridge value")
}
# if ncomp == 0 then wirrls without spls step
if (ncomp==0) {
BETA <- fit$Coefficients
}
#####################################################################
#### weighted SPLS step
#####################################################################
# if ncomp > 0
if (ncomp!=0) {
#Compute ponderation matrix V and pseudo variable z
#Pseudovar = Eta + W^-1 Psi
# Eta = X * betahat (covariate summary)
Eta <- cbind(rep(1, ntrain), sXtrain) %*% fit$Coefficients
## Run SPLS on Xtrain without svd decomposition
sXtrain = sXtrain.nosvd
sXtest = sXtest.nosvd
# mu = h(Eta)
mu = 1 / (1 + exp(-Eta))
# ponderation matrix : V
diagV <- mu * (1-mu)
V <- diag(c(diagV))
# inv de V
diagVinv = 1/ifelse(diagV!=0, diagV, diagV+0.00000001)
Vinv = diag(c(diagVinv))
Psi <- Ytrain-mu
pseudoVar = Eta + Vinv %*% Psi
# V-Center the sXtrain and pseudo variable
sumV=sum(diagV)
# Weighted centering of Pseudo variable
VmeanPseudoVar <- sum(V %*% Eta + Psi ) / sumV
# Weighted centering of sXtrain
VmeansXtrain <- t(diagV)%*%sXtrain/sumV
# SPLS(X, pseudo-var, weighting = V)
resSPLS = spls.adapt(Xtrain=sXtrain, Ytrain=pseudoVar, ncomp=ncomp, weight.mat=V, lambda.l1=lambda.l1, adapt=adapt, center.X=TRUE, scale.X=FALSE, center.Y=TRUE, scale.Y=FALSE, weighted.center=TRUE)
BETA = resSPLS$betahat.nc
}
#####################################################################
#### classification step
#####################################################################
hatYtest <- cbind(rep(1,ntest),sXtest) %*% BETA
hatYtest <- as.numeric(hatYtest>0)
#####################################################################
#### Conclude
#####################################################################
Coefficients=BETA
Coefficients[-1] <- diag(c(1/sqrt(sigma2train)))%*%BETA[-1]
Coefficients[1] <- BETA[1] - meanXtrain %*% Coefficients[-1]
#### RETURN
result <- list(Coefficients=Coefficients, hatYtest=hatYtest, converged=converged)
class(result) <- "rirls.spls.aux"
return(result)
}
|
2cd2ffaff108956d0d67a50b43783a2433ab39f7
|
c5afc1376eb22fbf791423fa8040fded3242d171
|
/R_Scripts/1_master_file.R
|
4db82abc7eb434e82b0b2a1025a3be1f70c414eb
|
[] |
no_license
|
sjkiss/class_voting_canada_2019
|
a1f69bbfdaf51c96a588cb21cd6806d4bb24d1af
|
346c88babdf5f6ac4affc9938faf855985713706
|
refs/heads/main
| 2023-03-24T17:12:11.676816
| 2021-03-19T14:00:47
| 2021-03-19T14:00:47
| 349,434,189
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,699
|
r
|
1_master_file.R
|
##load recoded ces files
load("Data/recoded_cesdata.Rdata")
library(tidyverse)
library(labelled)
library(here)
library(car)
### Checks
nrow(ces74)==2562 #TRUE
table(ces68$var323, ces68$var379)
table(ces68$var379, ces68$union_both)
table(ces74$size)
table(ces68$var379)
ces19phone$immigration
look_for(ces68, "marital")
#check for missing occupations
ces15phone %>%
filter(is.na(PES15_NOC)==F&is.na(occupation)==T) %>%
select(PES15_NOC, occupation) %>%
print(n=264)
ces19web %>%
filter(is.na(NOC)==F&is.na(occupation)==T) %>%
select(NOC, occupation)
#### Check the MIP Problems have been added ####
ces19phone$mip
#### SPLITTING THE 1979-1980 FILE ####
table(ces7980$male80)
names(ces7980)
names(ces93)
tail(names(ces0411))
library(labelled)
look_for(ces74, "respondent")
look_for(ces7980, "respondent")
look_for(ces74, "respondent")
look_for(ces7980, "filter")
#Get a summary of V9 sector and V4020
ces7980 %>%
select(V9, sector, V4020) %>%
summary()
#Gt a summary of ces74$V2
ces74%>%
select(V2) %>%
summary()
#### CES74 Sector ####
###### This code section creates a ces74 data frame from the ces74-79-80 panel survey
###### It does this because sector was only asked of non-manual respondents in ces74, but it was asked of everybody in ces79
###### Blais took the responses for the 79 question for the ces74 respondents who were reinterviewed in ces79 and made it to be their 74 response. So he went backward.
###### All our other demographic variables were created from the pure cross-sectional survey, so I didn't want to waste all that work.
###### When we get close to fully being able to replicate Blais, we can execute this code to create ces74 Until then we keep it off.
# table(ces7980$sector)
# table(ces74$V2)
# data("ces7980")
# ces7980 %>%
# #Select V9, sector and panel
# #V9 is joint ces74 and ces7990 respondent id, sector is sector variablef rom ces7980 and
# #V4020 is filter variable 1= ces7980 respondents who filled out ces74 surve
# select(V9, sector, V4020) %>%
# #inner join (return all rows from 7980 that have values in V9 that match in ces74 on V2)
# inner_join(., ces74, by=c("V9"="V2")) ->ces74.out
# #ces74.out is a reduced ces7980 dataframe; it now only includes ces7980 respondents who respondend to ces74 survey
# tail(names(ces74.out))
# table(ces7980$V9)
# #how many respondents in ces74
# nrow(ces74.out)
# #sector.x is the sector variable from 7980; should be a lot larger than sector.y
# table(ces74.out$sector.x)
# #setor.7 is the sector variable from ces74; only asked of non-manual respondents, see note in Blais (1990)
# #should be a lot smaller than sector.x
# table(ces74.out$sector.y)
# #The technical documentation says that there are 1295 CES74 panel respondents reinterviewed in CES79
# ## 1298 is close, but not exact
# table(ces74.out$V4020)#
# #There are 3 people who are not part of the ces74-79 panel that got caught with the same respondent IDS
# ces74.out %>%
# #Filter in respondents who have a value of 1 on the 74-79 panel filter
# filter(V4020==1)->ces74.out
#
# #take ces74.out
# ces74.out %>%
# #delete sector.y which is the sector variable from the pure ces74 study
# select(-sector.y) %>%
# #sector sector.x which is the sector variable from ces7980 to be sector to match all the other variables
# rename(sector=sector.x)->ces74.out
# ces74.out$sector
# nrow(ces74.out)
# #rename the whole ces74.out data frame to be ces74; old ces74 will now be gone.
# ces74<-ces74.out
#
# table(ces74$sector)
#Seprate ces79 and ces80 to two separate files
ces7980 %>%
filter(V4002==1)->ces79
ces7980 %>%
filter(V4008==1)->ces80
options(max.print=1500)
names(ces80)
names(ces7980)
### We have all of the demographic variables from the ces79 questions stored in the ces80 data set.
##Show this
table(ces80$male)
### Show that they are the same for the demogrphics
table(ces80$male, ces80$male80)
table(ces80$region, ces80$region80)
##but they are different for political variables for obvious reasons. Demographics didn't change much but vote changed quite a bit.
table(ces80$vote, ces80$vote80)
##We just need to turn the variables that end with 80 into regularly named variables.
ces80 %>%
select(male=male80, region=region80, quebec=quebec80, age=age80, language=language80, party_id=party_id80, vote=vote80, union, union_both, degree, employment, sector, income, occupation, occupation3, religion, non_charter_language, size, redistribution, pro_redistribution)->ces80
names(ces80)
### Filter out ces93 referendum respondents only by removing missing values from RTYPE4 (indicates ces93 respondents)
ces93[!is.na(ces93$RTYPE4), ] -> ces93
#####SPLITTING THE 04-11 FILE
### STEP 1
### The next thing to do would be to split the 2004-2011 file into separate files
### I don't think we want to mess around with the panel data
### I made a survey variable when we started
table(ces0411$survey)
names(ces0411)
##This code groups by the survey variable
ces0411 %>%
#make gtroups
group_by(survey) %>%
#summarize those gtroups by counting
summarize(n=n()) %>%
#arrange the data frame in descending order
arrange(desc(n)) %>%
#print all the rows
print(n=69)
#### STEP 2 FILTERING
#Panels not added but the rest have been
####CES04 ####
# ces0411 %>%
# filter(survey=="CPS04 PES04 MBS04" | survey=="CPS04 PES04" | survey=="CPS04 PES04 MBS04 CPS06 PES06" | survey=="CPS04 PES04 CPS06 PES06" | survey=="CPS04 PES04 CPS06" | survey=="CPS04 PES04 MBS04 CPS06" | survey=="CPS04 PES04 CPS06 PES06 CPS11 PES11" | survey=="CPS04 PES04 MBS04 CPS06 PES06 CPS11 PES11" | survey=="CPS04 PES04 MBS04 CPS06 PES06 CPS11 PES11 MBS11 WBS11" | survey=="CPS04 PES04 CPS06 PES06 CPS11" | survey=="CPS04 PES04 MBS04 CPS06 PES06 CPS11" | survey=="CPS04 PES04 MBS04 CPS06 PES06 CPS11 PES11 MBS11" | survey=="CPS04 PES04 CPS06 CPS11" | survey=="CPS04 PES04 MBS04 CPS06 CPS11" | survey=="CPS04 PES04 CPS06 CPS11 PES11" | survey=="CPS04 PES04 CPS06 PES06 CPS11 PES11 MBS11" | survey=="CPS04 PES04 CPS06 PES06 CPS11 PES11 MBS11 WBS11" | survey=="CPS04 PES04 MBS04 CPS06 CPS11 PES11" | survey=="CPS04 PES04 MBS04 CPS06 CPS11 PES11 MBS11" | survey=="CPS04 PES04 CPS06 CPS11 PES11 MBS11 WBS11" | survey=="CPS04 PES04 CPS06 CPS11 PES11 MBS11" | survey=="CPS04 PES04 MBS04 CPS06 CPS11 PES11 MBS11 WBS11")->ces04
# Do not use Panel respondents
# This way returns anyone who filled out PES04 and is not a Panel respondent
# This way we get 100 extra respondents
# ces0411 %>%
# filter(str_detect(ces0411$survey, "PES04")&str_detect(ces0411$survey, "Panel", negate=T))->ces04
#Use Panel Respondents
ces0411 %>%
filter(str_detect(ces0411$survey, "PES04"))->ces04
# Do the union checks
table(ces0411$union04)
table(ces0411$union_both04)#
table(ces04$union_both04)
nrow(ces04)
table( as_factor(ces04$ces04_CPS_S6A), as_factor(ces04$ces04_CPS_S6B), useNA = "ifany")
table(as_factor(ces04$union_both04), as_factor(ces04$ces04_CPS_S6A), useNA = "ifany")
table(as_factor(ces04$union_both04), as_factor(ces04$ces04_CPS_S6B), useNA = "ifany")
#### CES06 ####
# ces0411 %>%
# filter(survey=="CPS06 PES06" | survey=="CPS04 PES04 MBS04 CPS06 PES06" | survey=="CPS04 PES04 CPS06 PES06" | survey=="CPS04 PES04 CPS06 PES06 CPS11 PES11" | survey=="CPS04 PES04 MBS04 CPS06 PES06 CPS11 PES11" | survey=="CPS04 PES04 MBS04 CPS06 PES06 CPS11 PES11 MBS11 WBS11" | survey=="CPS04 PES04 CPS06 PES06 CPS11" | survey=="CPS04 PES04 MBS04 CPS06 PES06 CPS11" | survey=="CPS04 PES04 MBS04 CPS06 PES06 CPS11 PES11 MBS11" | survey=="CPS04 PES04 CPS06 PES06 CPS11 PES11 MBS11" | survey=="CPS04 PES04 CPS06 PES06 CPS11 PES11 MBS11 WBS11")->ces06
# nrow(ces06)
# Do not use Panel respondents
# ces0411 %>%
# filter(str_detect(ces0411$survey, "PES06")&str_detect(ces0411$survey, "Panel", negate=T))->ces06
## Use Panel Respondents
ces0411 %>%
filter(str_detect(ces0411$survey, "PES06"))->ces06
nrow(ces06)
#### CES08
# Do not use Panel respondents
# ces0411 %>%
# filter(str_detect(ces0411$survey, "PES08")&str_detect(ces0411$survey, "Panel", negate=T))->ces08
## Use Panel Respondents
### CES08
ces0411 %>%
filter(str_detect(ces0411$survey, "PES08"))->ces08
#### CES11 ####
# ces0411 %>%
# filter(survey=="New RDD_2011 CPS11 PES11" | survey=="New RDD_2011 CPS11" | survey=="New RDD_2011 CPS11 PES11 MBS11" | survey=="New RDD_2011 CPS11 PES11 MBS11 WBS11" | survey=="CPS04 PES04 CPS06 PES06 CPS11 PES11" | survey=="CPS04 PES04 MBS04 CPS06 PES06 CPS11 PES11" | survey=="CPS04 PES04 MBS04 CPS06 PES06 CPS11 PES11 MBS11 WBS11" | survey=="CPS04 PES04 MBS04 CPS06 PES06 CPS11 PES11 MBS11" | survey=="CPS04 PES04 CPS06 CPS11 PES11" | survey=="CPS04 PES04 CPS06 PES06 CPS11 PES11 MBS11" | survey=="CPS04 PES04 CPS06 PES06 CPS11 PES11 MBS11 WBS11" | survey=="CPS04 PES04 MBS04 CPS06 CPS11 PES11" | survey=="CPS04 PES04 MBS04 CPS06 CPS11 PES11 MBS11" | survey=="CPS04 PES04 CPS06 CPS11 PES11 MBS11 WBS11" | survey=="CPS04 PES04 CPS06 CPS11 PES11 MBS11" | survey=="CPS04 PES04 MBS04 CPS06 CPS11 PES11 MBS11 WBS11")->ces11
# Do not use Panel respondents
# ces0411 %>%
# filter(str_detect(ces0411$survey, "PES11")&str_detect(ces0411$survey, "Panel", negate=T))->ces11
#Use Panel respondents
ces0411 %>%
filter(str_detect(ces0411$survey, "PES11"))->ces11
#### nrows of each CES study####
nrow(ces04)
nrow(ces06)
nrow(ces08)
nrow(ces11)
#### STEP 3 RENAMING VARIABLES
### This is how we will rename the variables in each data frame.. removing the years.
#### Make CES04 ####
nrow(ces04)
table(ces04$union04, useNA = "ifany")
table(ces04$union_both04, useNA = "ifany")
table(ces04$ces04_CPS_S6A, useNA = "ifany")
table(ces04$union04, ces04$union_both04, useNA = "ifany")
#After consulting the ODESI merged file, there should be 802 yes and 1251 no to the respondent question in the merged file
table(ces0411$ces04_CPS_S6A)
#but after we kept only the respondents who took part in the PES04
table(ces04$ces04_CPS_S6A) # we lose more than half. So that can't be good.
table(ces04$union04, ces04$ces04_CPS_S6A, useNA = "ifany")
table(ces04$union_both04, ces04$ces04_CPS_S6A, useNA = "ifany")
table(ces04$union_both04, ces04$ces04_CPS_S6B, useNA = "ifany")
table(as_factor(ces04$ces04_CPS_S6A), as_factor(ces04$ces04_CPS_S6B), useNA = "ifany")
#### Rename CES 04####
ces04 %>%
rename(union_both=union_both04)->ces04
ces04 %>%
rename(union=union04)->ces04
ces04 %>%
rename(degree=degree04)->ces04
ces04 %>%
rename(region=region04)->ces04
ces04 %>%
rename(quebec=quebec04)->ces04
ces04 %>%
rename(age=age04)->ces04
ces04 %>%
rename(religion=religion04)->ces04
ces04 %>%
rename(language=language04)->ces04
ces04 %>%
rename(employment=employment04)->ces04
ces04 %>%
rename(sector=sector04)->ces04
ces04 %>%
rename(party_id=party_id04)->ces04
ces04 %>%
rename(vote=vote04)->ces04
ces04 %>%
rename(occupation=occupation04)->ces04
ces04 %>%
rename(income=income04)->ces04
ces04 %>%
rename(non_charter_language=non_charter_language04)->ces04
ces04 %>%
rename(occupation3=occupation04_3)->ces04
ces04 %>%
rename(redistribution=redistribution04)->ces04
ces04 %>%
rename(pro_redistribution=pro_redistribution04)->ces04
table(ces04$survey, ces04$non_charter_language)
#### Rename CES06 ####
ces06 %>%
rename(union_both=union_both06)->ces06
ces06 %>%
rename(union=union06)->ces06
ces06 %>%
rename(degree=degree06)->ces06
ces06 %>%
rename(region=region06)->ces06
ces06 %>%
rename(quebec=quebec06)->ces06
ces06 %>%
rename(age=age06)->ces06
ces06 %>%
rename(religion=religion06)->ces06
ces06 %>%
rename(language=language06)->ces06
ces06 %>%
rename(employment=employment06)->ces06
ces06 %>%
rename(sector=sector06)->ces06
ces06 %>%
rename(vote=vote06)->ces06
ces06 %>%
rename(party_id=party_id06)->ces06
ces06 %>%
rename(occupation=occupation06)->ces06
ces06 %>%
rename(income=income06)->ces06
ces06 %>%
rename(non_charter_language=non_charter_language06)->ces06
ces06 %>%
rename(occupation3=occupation06_3)->ces06
ces06 %>%
rename(redistribution=redistribution06)->ces06
ces06 %>%
rename(pro_redistribution=pro_redistribution06)->ces06
table(ces06$survey, ces06$non_charter_language)
#### Rename CES08 ####
ces08 %>%
rename(union_both=union_both08)->ces08
ces08 %>%
rename(union=union08)->ces08
ces08 %>%
rename(degree=degree08)->ces08
ces08 %>%
rename(region=region08)->ces08
ces08 %>%
rename(quebec=quebec08)->ces08
ces08 %>%
rename(age=age08)->ces08
ces08 %>%
rename(religion=religion08)->ces08
ces08 %>%
rename(language=language08)->ces08
ces08 %>%
rename(employment=employment08)->ces08
ces08 %>%
rename(sector=sector08)->ces08
ces08 %>%
rename(party_id=party_id08)->ces08
ces08 %>%
rename(vote=vote08)->ces08
ces08 %>%
rename(occupation=occupation08)->ces08
ces08 %>%
rename(income=income08)->ces08
ces08 %>%
rename(non_charter_language=non_charter_language08)->ces08
ces08 %>%
rename(occupation3=occupation08_3)->ces08
ces08 %>%
rename(redistribution=redistribution08)->ces08
ces08 %>%
rename(pro_redistribution=pro_redistribution08)->ces08
table(ces08$survey, ces08$non_charter_language)
#### Rename CES11 ####
ces11 %>%
rename(union_both=union_both11)->ces11
ces11 %>%
rename(union=union11)->ces11
ces11 %>%
rename(degree=degree11)->ces11
ces11 %>%
rename(region=region11)->ces11
ces11 %>%
rename(quebec=quebec11)->ces11
ces11 %>%
rename(age=age11)->ces11
ces11 %>%
rename(religion=religion11)->ces11
ces11 %>%
rename(language=language11)->ces11
ces11 %>%
rename(employment=employment11)->ces11
ces11 %>%
rename(sector=sector11)->ces11
ces11 %>%
rename(party_id=party_id11)->ces11
ces11 %>%
rename(vote=vote11)->ces11
ces11 %>%
rename(occupation=occupation11)->ces11
ces11 %>%
rename(income=income11)->ces11
ces11 %>%
rename(non_charter_language=non_charter_language11)->ces11
ces11 %>%
rename(occupation3=occupation11_3)->ces11
ces11 %>%
rename(redistribution=redistribution11)->ces11
ces11 %>%
rename(pro_redistribution=pro_redistribution11)->ces11
#### Rejoin the Files To Make CES ####
#For some years there are no variables (e.g. 1965 does not have a union variable)
#This is not actually a big deal.
#The trick is that bind_rows keeps *every* single variable, from the data frames that are bound
#If two data frames share a variable then it combines them and populates the values on the one variable from both data frames
#If one of the data frame has a variable that the other does not then it just fills the rows with missing values
#I *think* that this is the quickest way forward.
##We are going to make a list of each survey
ces.list<-list(ces65, ces68, ces72_nov, ces74, ces79, ces80, ces84, ces88, ces93, ces97, ces00, ces04, ces06, ces08, ces11, ces15phone, ces19phone)
#WE are going to name each item in the list
names(ces.list)<-c('1965', '1968', '1972','1974', '1979','1980', '1984', '1988', '1993', '1997', '2000', '2004', '2006', '2008', '2011', '2015', '2019')
#removing election files
#Remove these only if you run into memory troubles
# rm(ces00)
# rm(ces04)
# rm(ces0411)
# rm(ces06)
# rm(ces08)
# rm(ces11)
# rm(ces15phone)
# rm(ces65)
# rm(ces68)
# rm(ces72_nov)
# rm(ces74)
# rm(ces74b)
# rm(ces79)
# rm(ces84)
# rm(ces88)
# rm(ces93)
# rm(ces97)
# rm(ces19phone)
#
# str(ces.list)
# str(ces.list$`2019`)
# ces.list%>%
# map(., ncol)
#bind_rows binds the rows of each element in the list together
#.id="survey"creates a new variable called "survey" and its values are the names of the list items.
names(ces.list)
table(ces.list[["1984"]]$union_both)
ces.list[["1984"]]
library(haven)
#Start with the data frame
ces.list %>%
#WE have to zap the value labels (get rid of them to enable row b inding)
map(., zap_labels) %>%
#bind rows creating id variable "election"
bind_rows(., .id="election")->ces
#Do a summary
summary(ces)
#Check the names
tail(names(ces))
names(ces68)
#You see how this has *all* the variables from both 1993 and 1997.
#So here we just select out names variables that we want.
# ces %>%
# select(c("union", "degree", "survey"))-> ces
###We forgot to include the new variable "election" in what is selected.
ces %>%
select(c("male",
"union_both",
"union",
"degree",
"region",
"quebec",
"age",
"religion",
"language",
"employment",
"sector",
"party_id",
"vote",
"occupation",
"income",
"non_charter_language",
"occupation3",
"election", "size", "redistribution", "pro_redistribution") )-> ces
##
library(stringr)
table(str_detect(names(ces0411), "survey"))
table(str_detect(names(ces00), "survey"))
names(ces)
ces$election
table(ces$union)
#### Currently region is regions of English Canada only
#### quebec is dichotomous Quebec v. non-quebec
#### Create region2 which is one region variable for all of Canada
ces %>%
mutate(region2=case_when(
region==1 ~ "Atlantic",
region==2 ~ "Ontario",
region==3 ~"West",
quebec==1 ~ "Quebec"
))->ces
####Turn region2 into factor with Quebec as reference case
#### This can be changed anytime very easily
ces$region2<-factor(ces$region2, levels=c("Quebec", "Atlantic", "Ontario", "West"))
levels(ces$region2)
##Create female variable
## Sometimes we may want to report male dichotomous variable, sometimes female.
ces %>%
mutate(female=case_when(
male==1~0,
male==0~1
))->ces
library(car)
#To model party voting we need to create party vote dummy variables
ces$ndp<-Recode(ces$vote, "3=1; 0:2=0; 4:5=0; NA=NA")
ces$liberal<-Recode(ces$vote, "1=1; 2:5=0; NA=NA")
ces$conservative<-Recode(ces$vote, "0:1=0; 2=1; 3:5=0; NA=NA")
names(ces)
#### Some occupatoin recodes ####
#This collapses the two labour categories into one working class
#so occupation2 is always 1 working class but no self-employed/.
ces$occupation2<-Recode(as.factor(ces$occupation), "4:5='Working_Class' ; 3='Routine_Nonmanual' ; 2='Managers' ; 1='Professionals'", levels=c('Working_Class', 'Managers', 'Professionals', 'Routine_Nonmanual'))
#This collapses the two labour categories into one working class; maintaining self-employed as a unique distinction
#occupation 4 is always 1 working class but with self-employed carved out.
ces$occupation4<-Recode(as.factor(ces$occupation3), "4:5='Working_Class' ; 3='Routine_Nonmanual' ; 2='Managers' ; 1='Professionals'; 6='Self-Employed'", levels=c('Working_Class', 'Managers', 'Professionals', 'Routine_Nonmanual', 'Self-Employed'))
#make working class dichotomies out of ouccupation 4
ces$working_cass3<-Recode(ces$occupation4, "'Working_Class'=1; else=0; NA=NA")
ces$working_class4<-Recode(ces$occupation4, "'Working_Class'=1; else=0")
### Value labels often go missing in the creation of the ces data frame
### assign value label
val_labels(ces$sector)<-c(Private=0, Public=1)
val_labels(ces$vote)<-c(Conservative=2, Liberal=1, NDP=3, BQ=4, Green=5, Other=0)
val_labels(ces$male)<-c(Female=0, Male=1)
val_labels(ces$union_both)<-c(None=0, Union=1)
val_labels(ces$degree)<-c(nodegree=0, degree=1)
val_labels(ces$region)<-c(Atlantic=1, Ontario=2, West=3)
val_labels(ces$quebec)<-c(Other=0, Quebec=1)
val_labels(ces$religion)<-c(None=0, Catholic=1, Protestant=2, Other=3)
val_labels(ces$language)<-c(French=0, English=1)
val_labels(ces$non_charter_language)<-c(Charter=0, Non_Charter=1)
val_labels(ces$employment)<-c(Unemployed=0, Employed=1)
val_labels(ces$party_id)<-c(Other=0, Liberal=1, Conservative=2, NDP=3)
val_labels(ces$occupation)<-c(Professional=1, Managers=2, Routine_Nonmanual=3, Skilled=4, Unskilled=5)
val_labels(ces$income)<-c(Lowest=1, Lower_Middle=2, MIddle=3, Upper_Middle=4, Highest=5)
val_labels(ces$occupation3)<-c(Professional=1, Managers=2, Routine_Nonmanual=3, Skilled=4, Unskilled=5, Self_employed=6)
val_labels(ces$redistribution)<-c(Less=0, More=1)
####
names(ces)
#### Check Occupation####
ces %>%
select(occupation, occupation3, election) %>%
group_by(election) %>%
summarise_all(funs(sum(is.na(.))/length(.)))
#### Set Theme ####
theme_set(theme_bw())
#This command calls the file 2_diagnostics.R
#source("R_scripts/3_recode_diagnostics.R", echo=T)
#source("R_scripts/4_make_models.R", echo=T)
#source("R_scripts/5_ces15_models.R", echo=T)
#source("R_scripts/5_ces15_block_models.R", echo=T)
#source("R_scripts/5_ces19_models.R", echo=T)
#source("R_scripts/5_ces19_block_models.R", echo=T)
#source("R_scripts/7_class_logistic_models.R", echo=T)
#source("R_scripts/8_block_recursive_models.R", echo=T)
source("R_scripts/8_analysis_script.R", echo=T)
|
34d372a047a165443dba59917ddc8bea22b10ed7
|
d3410af0856f5ed552896a2bcd51548e5dd312eb
|
/man/social.Rd
|
03926b6cdf8ba1469a36f3540213c5aec161fff3
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
lnsongxf/experimentdatar
|
4f0810c49d29656a2757771eb843480a8bda7867
|
f71a9d072aabadf4da95e71baa757842a2d295c9
|
refs/heads/master
| 2021-01-01T09:48:14.429333
| 2019-02-11T12:15:59
| 2019-02-11T12:15:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,944
|
rd
|
social.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ExperimentData.R
\docType{data}
\name{social}
\alias{social}
\title{social data}
\format{A tibble with variables:
\describe{
\item{outcome_voted}{Response variable: Indicator variable where =1 indicates voted in the August 2006 primary}
\item{treat_neighbors}{Treatment: Indicator variable where =1 indicates Neighbors mailing treatment Covariates}
\item{sex}{Indicator variable where =1 indicates male}
\item{yob}{Year of birth}
\item{g2000}{Indicator variable where =1 indicates voted in the 2000 general}
\item{g2002}{Indicator variable where =1 indicates voted in the 2002 general}
\item{p2000}{Indicator variable where =1 indicates voted in the 2000 primary}
\item{p2002}{Indicator variable where =1 indicates voted in the 2002 primary}
\item{p2004}{Indicator variable where =1 indicates voted in the 2004 primary}
\item{city}{City index}
\item{hh_size}{Household size}
\item{totalpopulation_estimate}{Estimate of city population}
\item{percent_male}{Percentage males in household}
\item{median_age}{Median age in household}
\item{median_income}{Median income in household}
\item{percent_62yearsandover}{Percentage of subjects of age higher than 62 yo}
\item{percent_white}{Percentage white in household}
\item{percent_black}{Percentage black in household}
\item{percent_asian}{Percentage asian in household}
\item{percent_hispanicorlatino}{Percentage hispanic or latino in household}
\item{employ_20to64}{Percentage of employed subjects of age 20 to 64 yo}
\item{highschool}{Percentage having only high school degree}
\item{bach_orhigher}{Percentage having bachelor degree or higher}
}}
\source{
\url{https://github.com/gsbDBI/ExperimentData/tree/master/Social}
}
\usage{
social
}
\description{
Data used for the paper "Social Pressure and Voter Turnout: Evidence from a
Large-Scale Field Experiment" by Gerber, Green, and Larimer (2008).
}
\keyword{datasets}
|
f2935cb296ef346687675570000c085eac100987
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/cIRT/R/survey_data.R
|
1402764e1c61ee55d40824fa627a70e9d6aa8bb4
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 583
|
r
|
survey_data.R
|
#' @name survey_data
#' @title Survey Data
#' @description This data set contains the subject's responses survey questions administered using Choice38.
#' @docType data
#' @usage data(survey_data)
#' @format A data frame with 102 observations on the following 2 variables.
#' \describe{
#' \item{\code{id}}{Subject's Assigned Research ID}
#' \item{\code{sex}}{Subject's sex:
#' \itemize{
#' \item Male
#' \item Female
#' }
#' }
#' }
#' @source Choice38 Experiment at UIUC during Spring 2014 - Fall 2014
#' @author Steven Culpepper and James Balamuta
NULL
|
5d967582fe9caa4e7e92ba9f221f419a3ae1dd50
|
90b7b861e03f62e160891de6288d0b774c413a9d
|
/Network Analysis in R/Network analysis in R.R
|
397a26725bd1244204355b89e7bc4263e135ced0
|
[] |
no_license
|
arjun-1102/R-Programming
|
7b1927f79ec97fd6b715859156aeea0b42d56c26
|
fd0ad6399bc344bf329839477f5084f29cad9f85
|
refs/heads/master
| 2021-04-03T01:23:46.647059
| 2018-05-13T17:37:33
| 2018-05-13T17:37:33
| 124,435,537
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,494
|
r
|
Network analysis in R.R
|
###---- Network Analysis in R
# Load igraph
library(igraph)
# Inspect the first few rows of the dataframe 'friends'
head(friends)
# Convert friends dataframe to a matrix
friends.mat <- as.matrix(friends)
# Convert friends matrix to an igraph object
g <- graph.edgelist(friends.mat, directed = FALSE)
# Make a very basic plot of the network
plot(g)
# Load igraph
library(igraph)
# Subset vertices and edges
V(g)
E(g)
# Count number of edges
gsize(g)
# Count number of vertices
gorder(g)
library(igraph)
# Inspect the objects 'genders' and 'ages'
genders
ages
# Create new vertex attribute called 'gender'
g <- set_vertex_attr(g, "gender", value = genders)
# Create new vertex attribute called 'age'
g <- set_vertex_attr(g, "age", value = ages)
# View all vertex attributes in a list
vertex_attr(g)
# View attributes of first five vertices in a dataframe
V(g)[[1:5]]
library(igraph)
# View hours
hours
# Create new edge attribute called 'hours'
g <- set_edge_attr(g, "hours", value = hours)
# View edge attributes of graph object
edge_attr(g)
# Find all edges that include "Britt"
E(g)[[inc('Britt')]]
# Find all pairs that spend 4 or more hours together per week
E(g)[[hours>=4]]
library(igraph)
# Create an igraph object with attributes directly from dataframes
g1 <- graph_from_data_frame(d = friends1_edges, vertices = friends1_nodes, directed = FALSE)
# Subset edges greater than or equal to 5 hours
E(g1)[[hours>=5]]
vertex_attr(g1)
# Plot network and color vertices by gender
V(g1)$color <- ifelse(V(g1)$gender == 'F', "orange", "dodgerblue")
plot(g1, vertex.label.color = "black")
library(igraph)
# Plot the graph object g1 in a circle layout
plot(g1, vertex.label.color = "black", layout = layout_in_circle(g1))
# alt layout_in_circle
# Plot the graph object g1 in a Fruchterman-Reingold layout
plot(g1, vertex.label.color = "black", layout = layout_with_fr(g1))
# alt layout_with_fr
# Plot the graph object g1 in a Tree layout
m <- layout_as_tree(g1)
plot(g1, vertex.label.color = "black", layout = m)
# alt
# Plot the graph object g1 using igraph's chosen layout
m1 <- layout_nicely(g1)
plot(g1, vertex.label.color = "black", layout = m1)
###----------- Edge styling
library(igraph)
# Create a vector of weights based on the number of hours each pair spend together
w1 <- E(g1)$hours
# Plot the network varying edges by weights
m1 <- layout_nicely(g1)
plot(g1,
vertex.label.color = "black",
edge.color = 'black',
edge.width = w1,
layout = m1)
# Create a new igraph object only including edges from the original graph that are greater than 2 hours long
g2 <- delete_edges(g1, E(g1)[hours < 2])
# Plot the new graph
w2 <- E(g2)$hours
m2 <- layout_nicely(g2)
plot(g2,
vertex.label.color = "black",
edge.color = 'black',
edge.width = w2,
layout = m2)
###-------------- Directed networks
library(igraph)
# Get the graph object
g <- graph_from_data_frame(measles, directed = T)
# is the graph directed?
is.directed(g)
# Is the graph weighted?
is.weighted(g)
# Where does each edge originate from?
table(head_of(g, E(g)))
###-------- Identifying edges for each vertex
library(igraph)
# Make a basic plot
plot(g,
vertex.label.color = "black",
edge.color = 'gray77',
vertex.size = 0,
edge.arrow.size = 0.1,
layout = layout_nicely(g))
# Is there an edge going from vertex 184 to vertex 178?
g['184', '178']
# Is there an edge going from vertex 178 to vertex 184?
g['178', '184']
# Show all edges going to or from vertex 184
incident(g, '184', mode = c("all"))
# Show all edges going out from vertex 184
incident(g, '184', mode = c("out"))
###----- Identifying neighbors
library(igraph)
# Identify all neighbors of vertex 12 regardless of direction
neighbors(g, '12', mode = c('all'))
# Identify other vertices that direct edges towards vertex 12
neighbors(g, '12', mode = c('in'))
# Identify any vertices that receive an edge from vertex 42 and direct an edge to vertex 124
n1 <- neighbors(g, '42', mode = c('out'))
n2 <- neighbors(g, '124', mode = c('in'))
intersection(n1, n2)
###----- Distances between vertices
library(igraph)
# Which two vertices are the furthest apart in the graph ?
farthest_vertices(g)
# Shows the path sequence between two furthest apart vertices.
get_diameter(g)
# Identify vertices that are reachable within two connections from vertex 42
ego(g, 2, '42', mode = c('out'))
# Identify vertices that can reach vertex 42 within two connections
ego(g, 2, '42', mode = c('in'))
###----- Identifying key vertices
library(igraph)
# Calculate the out-degree of each vertex
g.outd <- degree(g, mode = c("out"))
# View a summary of out-degree
table(g.outd)
# Make a histogram of out-degrees
hist(g.outd, breaks = 30)
# Find the vertex that has the maximum out-degree
which.max(g.outd)
###------------- Calculating betweenness of network
library(igraph)
# Calculate betweenness of each vertex
g.b <- betweenness(g, directed = T)
# Show histogram of vertex betweenness
hist(g.b, breaks = 80)
# Create plot with vertex size determined by betweenness score
plot(g,
vertex.label = NA,
edge.color = 'black',
vertex.size = sqrt(g.b)+1,
edge.arrow.size = 0.05,
layout = layout_nicely(g))
###----- Visualizing important nodes and edges
# Make an ego graph
g184 <- make_ego_graph(g, diameter(g), nodes = '184', mode = c("all"))[[1]]
# Get a vector of geodesic distances of all vertices from vertex 184
dists <- distances(g184, "184")
# Create a color palette of length equal to the maximal geodesic distance plus one.
colors <- c("black", "red", "orange", "blue", "dodgerblue", "cyan")
# Set color attribute to vertices of network g184.
V(g184)$color <- colors[dists+1]
# Visualize the network based on geodesic distance from vertex 184 (patient zero).
plot(g184,
vertex.label = dists,
vertex.label.color = "white",
vertex.label.cex = .6,
edge.color = 'black',
vertex.size = 7,
edge.arrow.size = .05,
main = "Geodesic Distances from Patient Zero"
)
###----- Forest gump network eigenvector centrality
library(igraph)
# Inspect Forrest Gump Movie dataset
head(gump)
# Make an undirected network
g <- graph_from_data_frame(gump, directed = FALSE)
# Identify key nodes using eigenvector centrality
g.ec <- eigen_centrality(g)
which.max(g.ec$vector)
# Plot Forrest Gump Network
plot(g,
vertex.label.color = "black",
vertex.label.cex = 0.6,
vertex.size = 25*(g.ec$vector),
edge.color = 'gray88',
main = "Forrest Gump Network"
)
###-------- Density and average path length
library(igraph)
# Get density of a graph
gd <- edge_density(g)
# Get the diameter of the graph g
diameter(g, directed = FALSE)
# Get the average path length of the graph g
g.apl <- mean_distance(g, directed = FALSE)
g.apl
###_-------- Creating random graphs
library(igraph)
# Create one random graph with the same number of nodes and edges as g
g.random <- erdos.renyi.game(n = gorder(g), p.or.m = edge_density(g), type = "gnp")
g.random
plot(g.random)
# Get density of new random graph `g.random`
edge_density(g.random)
#Get the average path length of the random graph g.random
mean_distance(g.random, directed = FALSE)
####-------- Network randomization
library(igraph)
# Generate 1000 random graphs
gl <- vector('list', 1000)
for(i in 1:1000){
gl[[i]] <- erdos.renyi.game(n = gorder(g), p.or.m = gd, type = "gnp")
}
# Calculate average path length of 1000 random graphs
gl.apl <- lapply(gl, mean_distance, directed = FALSE)
gl.apls <- unlist(gl.apl)
# Plot the distribution of average path lengths
hist(gl.apls, xlim = range(c(1.5, 6)))
abline(v = g.apl, col = "red", lty = 3, lwd=2)
# Calculate the proportion of graphs with an average path length lower than our observed
sum(gl.apls < average.path.length(g))/1000
###------ Network substructures
library(igraph)
# Show all triangles in the network.
matrix(triangles(g), nrow = 3)
# Count the number of triangles that vertex "BUBBA" is in.
count_triangles(g, vids='BUBBA')
# Calculate the global transitivity of the network.
g.tr <- transitivity(g)
g.tr
# Calculate the local transitivity for vertex BUBBA.
transitivity(g, vids='BUBBA', type = "local")
###------------- Transitivity randomization
library(igraph)
# Calculate average transitivity of 1000 random graphs
gl.tr <- lapply(gl, transitivity)
gl.trs <- unlist(gl.tr)
# Get summary statistics of transitivity scores
summary(gl.trs)
# Calculate the proportion of graphs with a transitivity score higher than Forrest Gump's network.
sum(gl.trs > transitivity(g))/1000
###-------- Playing with cliques
library(igraph)
# Identify the largest cliques in the network
largest_cliques(g)
# Determine all maximal cliques in the network and assign to object 'clq'
clq <- max_cliques(g)
# Calculate the size of each maximal clique.
table(unlist(lapply(clq, length)))
####_---------- Visualizing cliques
library(igraph)
# Assign largest cliques output to object 'lc'
lc <- largest_cliques(g)
# Create two new undirected subgraphs, each containing only the vertices of each largest clique.
gs1 <- as.undirected(subgraph(g, lc[[1]]))
gs2 <- as.undirected(subgraph(g, lc[[2]]))
# Plot the two largest cliques side-by-side
par(mfrow=c(1,2)) # To plot two plots side-by-side
plot(gs1,
vertex.label.color = "black",
vertex.label.cex = 0.9,
vertex.size = 0,
edge.color = 'gray28',
main = "Largest Clique 1",
layout = layout.circle(gs1)
)
plot(gs2,
vertex.label.color = "black",
vertex.label.cex = 0.9,
vertex.size = 0,
edge.color = 'gray28',
main = "Largest Clique 2",
layout = layout.circle(gs2)
)
###------- Calculating assortativity
# Plot the network
plot(g1)
# Convert the gender attribute into a numeric value
values <- as.numeric(factor(V(g1)$gender))
# Calculate the assortativity of the network based on gender
assortativity(g1, values)
# Calculate the assortativity degree of the network
assortativity.degree(g1, directed = FALSE)
###----------- Randomizing assortativity
# Calculate the observed assortativity
observed.assortativity <- assortativity(g1, values)
# Calculate the assortativity of the network randomizing the gender attribute 1000 times
results <- vector('list', 1000)
for(i in 1:1000){
results[[i]] <- assortativity(g1, sample(values))
}
# Plot the distribution of assortativity values and add a red vertical line at the original observed value
hist(unlist(results))
abline(v = observed.assortativity, col = "red", lty = 3, lwd=2)
###------ calcuating reciprocity
library(igraph)
# Make a plot of the chimp grooming network
plot(g,
edge.color = "black",
edge.arrow.size = 0.3,
edge.arrow.width = 0.5)
# Calculate the reciprocity of the graph
reciprocity(g)
###------- Community detection
##3--- Fast greedy
# Perform fast-greedy community detection on network graph
kc = fastgreedy.community(g)
# Determine sizes of each community
sizes(kc)
# Determine which individuals belong to which community
membership(kc)
# Plot the community structure of the network
plot(kc, g)
##---- Edge betweenness community detection
# Perform edge-betweenness community detection on network graph
gc = edge.betweenness.community(g)
# Determine sizes of each community
sizes(gc)
# Plot community networks determined by fast-greedy and edge-betweenness methods side-by-side
par(mfrow = c(1, 2))
plot(kc, g)
plot(gc, g)
###-------- Creating interactive graphs using threejs
library(igraph)
library(threejs)
# Set a vertex attribute called 'color' to 'dodgerblue'
g <- set_vertex_attr(g, "color", value = "dodgerblue")
# Redraw the graph and make the vertex size 1
graphjs(g, vertex.size = 1)
####-------- Some more interactive graphs
# Create numerical vector of vertex eigenvector centralities
ec <- as.numeric(eigen_centrality(g)$vector)
# Create new vector 'v' that is equal to the square-root of 'ec' multiplied by 5
v <- 5*sqrt(ec)
# Plot threejs plot of graph setting vertex size to v
graphjs(g, vertex.size = v)
# Create an object 'i' containin the memberships of the fast-greedy community detection
i <- membership(kc)
# Check the number of different communities
sizes(kc)
# Add a color attribute to each vertex, setting the vertex color based on community membership
g <- set_vertex_attr(g, "color", value = c("yellow", "blue", "red")[i])
# Plot the graph using threejs
graphjs(g)
|
46f6caf3801e88e159835be37fdca699f5c8aac3
|
0d3011640e72586db9b3b7c8533fd6b94a59e6ce
|
/PieBarViz/man/hello.Rd
|
2651b7271be0f815b5c3063034bbf62d3190bc98
|
[] |
no_license
|
GenevieveRichards/Pie_Bar_Viz
|
31dc5ff120725a9972642878a58f0ef4e8d10f8d
|
cc77a3df4cdb63cd5de014e9c9dc0c50e8627e02
|
refs/heads/master
| 2020-04-13T18:10:01.368301
| 2018-12-28T23:17:55
| 2018-12-28T23:17:55
| 163,366,594
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 405
|
rd
|
hello.Rd
|
\name{pieBarViz}
\alias{pieBarViz}
\title{Pie Bar Vizualisation}
\usage{
pieBar(Dataset,Stacked = FALSE )
}
\description{
}
\examples{
chart2 <-
read.table(text = "Name No_Mutation ATM ATR BRCA2 NBN PALB2 PMS2 POLD1 PRSS1 RAD51D SLX4 XRCC2
'Aggressive Cases' 85 2 1 2 2 1 1 1 1 1 1 1",
header = TRUE)
newData <- melt(chart2, id.var = "Name")
pieBar(newData)
}
|
068124baaf71ac2ecd8ef3bcd4a2a847ddde36de
|
ded24dcc44f53ec1adcf336ed211e4b8c4076c70
|
/library/graph_level.R
|
ceba741b9104490c134f1b259d664f667428e493
|
[] |
no_license
|
hying99/capstry
|
76ecd155d3a3a11a1e9e512249a34b3835235140
|
e7288ab030e72cdee53bc882f9727d798aa5d2a7
|
refs/heads/master
| 2023-07-04T07:46:47.850022
| 2021-08-08T08:42:09
| 2021-08-08T08:42:09
| 393,864,850
| 1
| 0
| null | 2021-08-08T08:42:10
| 2021-08-08T05:15:42
|
Java
|
GB18030
|
R
| false
| false
| 1,118
|
r
|
graph_level.R
|
#按照路径最长原则得到每个GO标签所属于的层级
GraphLevel<-function (graph.var,onto="BP")
{
graph.new.nodes=graph.var@nodes
graph.new.edgeL=graph.var@edgeL
for(i in 1:length(graph.var@edgeL))
{
if(length(graph.var@edgeL[[i]][[1]])>0)
{
graph.new.edgeL[i][[1]]=list(edges=graph.var@edgeL[[i]][[1]],weights=rep(-1,length(graph.var@edgeL[[i]][[1]])))
}
}
if(onto=="BP")
{
graph.new=graphNEL(graph.new.nodes, graph.new.edgeL, edgemode = "directed")
graph.new.distance=bellman.ford.sp(graph.new,graph.new.nodes[which(graph.new.nodes=="GO:0008150")])
}
if(onto=="CC")
{
graph.new=graphNEL(graph.new.nodes, graph.new.edgeL, edgemode = "directed")
graph.new.distance=bellman.ford.sp(graph.new,graph.new.nodes[which(graph.new.nodes=="GO:0005575")])
}
if(onto=="MF")
{
graph.new=graphNEL(graph.new.nodes, graph.new.edgeL, edgemode = "directed")
graph.new.distance=bellman.ford.sp(graph.new,graph.new.nodes[which(graph.new.nodes=="GO:0003674")])
}
graph.level=-graph.new.distance$distance
return(graph.level)
}
|
bb802ccac838debb1a85409fcb12da502685d3c7
|
92742af6fbcb1d9fdd2e54360c6a5afdbf3bc94d
|
/man/boxGet.Rd
|
ebaa891de735ffffbb80fc8c1bc386a2e6767f25
|
[
"MIT"
] |
permissive
|
alexbrodersen/boxr
|
93f7405828ac445acb6a65ce347cf49c82bd0b99
|
14d2ae61c5142edc4dc8ff1c1c96c64b1b5bf650
|
refs/heads/master
| 2022-12-29T08:07:18.782372
| 2020-09-06T18:05:56
| 2020-09-06T18:05:56
| 295,472,641
| 0
| 0
|
NOASSERTION
| 2020-09-14T16:20:04
| 2020-09-14T16:20:03
| null |
UTF-8
|
R
| false
| true
| 609
|
rd
|
boxGet.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/boxr__internal_get.R
\name{boxGet}
\alias{boxGet}
\title{Issue a get request for a file stored on box.com}
\usage{
boxGet(
file_id,
local_file,
version_id = NULL,
version_no = NULL,
download = FALSE,
pb = FALSE
)
}
\description{
This internal function is shared by \code{\link[=box_dl]{box_dl()}}, and the
\code{\link[=box_read]{box_read()}} family of functions, to issue GET requests, while
handling things like version numbers etc. It can be used to download a file,
or just read it into memory.
}
\keyword{internal}
|
62d6e4c1d1b3a03550725f154491e7e10483201a
|
9b07a5ebd6eeceeda384cfdcb4012763769d9455
|
/code/SelectSlidingwindow.R
|
388b629ede62c636b2355faa4b08ab47a27e96ba
|
[] |
no_license
|
qlcm/tmp
|
4a16aebd06581bb738c137bedf3566b780c218e6
|
fc33d28354cad3863b9743617ebf5d65b7488e5d
|
refs/heads/master
| 2021-01-10T04:52:28.976116
| 2018-05-05T13:55:13
| 2018-05-05T13:55:13
| 54,164,883
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,107
|
r
|
SelectSlidingwindow.R
|
GetDensity <- function(ref.fa, kWinSize) {
posi <- matchPattern("CG",ref.fa)
rt <- logical(ref.length)
rt[start(posi)] <- TRUE
win <- list(L = kWinSize, R = kWinSize)
return(swsCalc(rt, win))
}
GetScore <- function(cg.mtbr, kWinSize, ref.length) {
colnames(cg.mtbr) <- c("chr", "posi", "rC_n", "rC_p", "rT_n", "rT_p")
cg.mtbr$rC <- cg.mtbr$rC_p + cg.mtbr$rC_n
cg.mtbr$rT <- cg.mtbr$rT_p + cg.mtbr$rT_n
rC <- integer(ref.length)
rC[cg.mtbr$posi] <- cg.mtbr$rC
rT <- integer(ref.length)
rT[cg.mtbr$posi] <- cg.mtbr$rT
win <- list(L = kWinSize, R = kWinSize)
rCs <- swsCalc(rC, win)
rTs <- swsCalc(rT, win)
score <- rCs/(rCs + rTs)
score[is.na(score[])] <- 0
return(score)
}
library("methyutils")
library("BSgenome.Hsapiens.UCSC.hg38")
tissues <- list.files("/home/qzzh/J.R.Ecker/mtbr/mtbr1/")
file_path<- "/home/qzzh/J.R.Ecker/mtbr/mtbr1/"
#chrs <- c("chr21","chr22","chrX","chrY")
chrs <- c("chr1","chr2","chr3","chr4","chr5","chr6","chr7","chr8","chr9","chr10","chr11","chr12","chr13","chr14","chr15",
"chr16","chr17","chr18","chr19","chr20","chr21","chr22","chrX","chrY")
#ref.fa <- Hsapiens[[ts]]
#ref.length <- length(ref.fa)
#ts.list <- list()
for(ts in tissues){
for (chr in chrs){
message(paste(ts," ",chr," ","is running ",sep=""),date())
load(paste(file_path,ts,"/",ts,"_unique.mtbr.cg/",chr,".Rdata",sep=""))
ref.fa <- Hsapiens[[chr]]
ref.length <- length(ref.fa)
#ks <- c()
#cors <- c()
kWinSize <- 1250
density <- GetDensity(ref.fa, kWinSize)
score <- GetScore(cg.mtbr, kWinSize, ref.length)
cr <- cor(density[cg.mtbr$posi],score[cg.mtbr$posi])
#ks <- c(ks,kWinSize)
#cors <- c(cors,cr)
sw.df <- data.frame(winsize = kWinSize,cor = cr,tissue = ts,chrom=chr)
#ts.list[[ts]] <- sw.df
write.table(sw.df,"/home/qzzh/cgDensity/SelectSlidingwindow/SelectSlidingwindow_mtbr1.txt",row.names=F,col.names=F,quote=F,append=T)
}
}
#tissue <- do.call(rbind,ts.list)
#write.table(tissue,"/home/qzzh/cgDensity/SelectSlidingwindow/SelectSlidingwindow.txt",row.names=F,col.names=T,quote=F)
|
8cdbf7b29f1a3065e5c756c90cf891fde6805d79
|
5d371965180b1e28876554175f15ad7ba289795a
|
/Plot2.R
|
abab8041659661702444432b4356cbc449a99542
|
[] |
no_license
|
emclass/MyPlotsProj1
|
148a943be9528c4a96b62ffe7c3ddd99606d20f2
|
7677c8c72ad9b29fa0b13b8886c2c98e53382ff4
|
refs/heads/master
| 2020-12-26T08:15:05.840526
| 2014-10-14T01:58:43
| 2014-10-14T01:58:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 798
|
r
|
Plot2.R
|
initial <- read.table("household_power_consumption.txt", sep= ";", nrows=5000,
header = TRUE)
classes <- sapply(initial, class)
data <- read.table("household_power_consumption.txt", sep = ";", colClasses=classes,
na.strings = "?",header=TRUE)
library(data.table)
data<-data.table(data)
#setkey(data,Date)
subdata<-data[as.Date(data$Date,"%d/%m/%Y") >= as.Date(c("01/02/2007"),"%d/%m/%Y") &
as.Date(data$Date,"%d/%m/%Y") <= as.Date(c("02/02/2007"), "%d/%m/%Y"),]
#Plot 2
par(mfrow=c(1,1))
subdata<-subdata[,DateTime:=paste(Date,Time,sep="")]
xData<-strptime(subdata$DateTime,"%d/%m/%Y %H:%M:%S")
plot(xData,subdata$Global_active_power, type="l",xlab="",ylab="Global Active Power (kilowatts)")
dev.copy(png,file='Plot2.png')
dev.off()
|
9e972efad7e3f5c3cc957c1d4c18dda2628ea6c4
|
ecfc59b69d7c55c7e9cf14475b45fcb128cc5705
|
/man/is_date.Rd
|
d27f543caff129a8281b686ae6c548b989dd9b1b
|
[] |
no_license
|
cran/assertive
|
2c49d5e8197c3eaa844212dbf54ba2c999c08666
|
3fa4a529eeaa4560c065c2043ae122105f1469f8
|
refs/heads/master
| 2020-12-29T02:44:25.998287
| 2020-08-01T00:00:02
| 2020-08-01T00:00:02
| 17,694,504
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 327
|
rd
|
is_date.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exports-types.R
\name{is_date}
\alias{is_date}
\alias{is_posixct}
\alias{is_posixlt}
\alias{assert_is_date}
\alias{assert_is_posixct}
\alias{assert_is_posixlt}
\title{Is the input a date?}
\description{
See \code{\link[assertive.types]{is_date}}.
}
|
5000ac46cdfd4a902c1c004fccf4f45598b92691
|
9ec02f3f906d9b5d5e8641de721ededa1b50831f
|
/man/partial.Rd
|
3561311c98bd111ae2ac8e2a566526c6d71287dc
|
[] |
no_license
|
thomasp85/curry
|
c84ae13ba02724373ff5ee863903dc240e8d4f66
|
e68ed51ec823f81986774ce855da278559d79f13
|
refs/heads/master
| 2020-05-23T08:09:11.205811
| 2016-09-29T13:49:04
| 2016-09-29T13:49:04
| 69,344,879
| 29
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,419
|
rd
|
partial.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/partial.R
\name{partial}
\alias{\%><\%}
\alias{partial}
\title{Apply arguments partially to a function}
\usage{
fun \%><\% args
partial(fun, args)
}
\arguments{
\item{fun}{A function to be partially applied. Can be any function (normal,
already partially applied, primitives).}
\item{args}{A list of values that should be applied to the function.}
}
\value{
A function with the same arguments as \code{fun} except for the ones
given in \code{args}
}
\description{
The \code{partial} function and the \code{\%><\%} operator allows you to
partially call a function with a list of arguments. Named elements in the
list will be matched to function arguments and these arguments will be
removed from the returned function. Unnamed elements are only allowed for
functions containing an ellipsis, in which case they are considered part of
the ellipsis.
}
\note{
Multiple partial application does not result in multiple nested calls,
so while the first partial call adds a layer around the called function,
potentially adding a very small performance hit, partially calling multiple
times will not add to this effect.
}
\examples{
dummy_lengths <- vapply \%><\% list(FUN = length, FUN.VALUE = integer(1))
test_list <- list(a = 1:5, b = 1:10)
dummy_lengths(test_list)
}
\seealso{
Other partials: \code{\link{curry}},
\code{\link{tail_curry}}
}
|
594b946102efd0ab17888a4f1f55bba085606c2d
|
50711e687a44aeb126149528f2e5deec6e261cfb
|
/complexHeatMap.R
|
175dee70bbf2786014c1fbd3c91badbefb8feb96
|
[] |
no_license
|
BlackburnLab/immuneCellEnrichment
|
83ba534213e7a7de6a21b6854149b182b3a56545
|
3f23106995dca2a7037b8f21f1499f438ef194a3
|
refs/heads/master
| 2023-04-02T13:22:44.139834
| 2021-03-14T18:58:49
| 2021-03-14T18:58:49
| 352,647,806
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,233
|
r
|
complexHeatMap.R
|
library(devtools)
##install_github("jokergoo/ComplexHeatmap")
library(ComplexHeatmap)
library(UpSetR)
cellPop = read.csv("C:/Users/Javan_Okendo/Desktop/cybersort/cybersort_source_code/TB_hart_deconPDF/complete_sample_group_immune_deconvolution.csv",
header = TRUE, sep = ",")
head(cellPop)
Heatmap(cellPop[2:23])
nrow(cellPop)
ncol(cellPop)
#============================
#Heatmap analysis of the different cell fractions
library(ggplot2)
library(RColorBrewer)
library(heatmap.plus)
#Previous TB Patient groups
#Load the dataframe
prevTB<- read.csv("C:/Users/Javan_Okendo/Desktop/cybersort/cybersort_source_code/TB_hart_deconPDF/prevTB.csv",header = T,sep = ',')
head(prevTB) #Check the first few lines of the data
BCG <- prevTB[grep("_BCG",prevTB$Input.Sample), ] #Extract the BCG patient Cohort
PPD <- prevTB[grep("_PPD",prevTB$Input.Sample), ]
Baseline <- prevTB[grep("_BL",prevTB$Input.Sample), ]
Saline <- prevTB[grep("_sal",prevTB$Input.Sample), ]
#==========================PCA============================
df = read.csv("complete_sample_group_immune_deconvolution.csv",header = T,sep = ',')
library("FactoMineR")
library("factoextra")
df2=df[,-1:-2]
PCA(df2, scale.unit = TRUE, ncp = 5, graph = F)
#===================correlation
res.pca <- PCA(df2, graph = FALSE)
print(res.pca)
#Visualize and interprate the data
fviz_pca_biplot(res.pca)
#Screeplot
fviz_eig(res.pca, addlabels = TRUE, ylim = c(0, 50))
#Correlation between the immune cells in BALF
fviz_pca_var(res.pca, col.var = "red",repel = T)
#Get the PCA variables
var <- get_pca_var(res.pca)
var
# Coordinates
head(var$coord)
# Cos2: quality on the factore map
head(var$cos2)
# Contributions to the principal components
head(var$contrib)
#correlation plot
library("corrplot")
corrplot(var$cos2, is.corr=F)
# Total cos2 of variables on Dim.1 and Dim.2
fviz_cos2(res.pca, choice = "var", axes = 1:2)
# Color by cos2 values: quality on the factor map
fviz_pca_var(res.pca, col.var = "cos2",
gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"),
repel = TRUE # Avoid text overlapping
)
#Run the PCA function
iris.pca <- PCA(df2, graph = FALSE)
fviz_pca_ind(iris.pca,
geom.ind = "point", # show points only (nbut not "text")
col.ind = df$Group, # color by groups
palette = c("#00AFBB", "#E7B800", "#FC4E07","#000066"),
addEllipses = F, # Concentration ellipses
legend.title = "Challenge Groups"
)
#Naming the individual points
ind.p <- fviz_pca_ind(iris.pca, geom = "point", col.ind = df$Group)
ggpubr::ggpar(ind.p,
title = "Principal Component Analysis",
subtitle = "Baseline patient challenge groups",
caption = "Source: factoextra",
xlab = "PC1", ylab = "PC2",
legend.title = "Species", legend.position = "top",
ggtheme = theme_light(), palette = "jco"
)
#Add more aestethics
fviz_pca_biplot(iris.pca,repel = TRUE,
# Individuals
geom.ind = "point",
fill.ind = df$Group, col.ind = "black",
pointshape = 21, pointsize = 2,
palette = "jco",
addEllipses = F,
# Variables
alpha.var ="contrib", col.var = "contrib",
gradient.cols = "RdYlBu",
legend.title = list(fill = "Group", color = "Contrib",
alpha = "Contrib")
)
#=====================End of PCA========================================================
setwd("C:/Users/Javan_Okendo/Desktop/cybersort/complete_BAL_countmatrix/")
total_samples <- read.csv("C:/Users/Javan_Okendo/Desktop/cybersort/complete_BAL_countmatrix/complete_sample_group_immune_deconvolution.csv",header = T,sep = ',')
#Extract the different patient groups for t-SNE analysis
LTBI <- total_samples[grep("_LTBI",total_samples$Input.Sample), ] #LTBI patient group
write.csv(LTBI, file = "LTBI_patient_group_subset.csv")
prevTB <- total_samples[grep("_prevTB",total_samples$Input.Sample), ] #PPD patient group
write.csv(prevTB,file = "prevTB_patient_group_subset.csv")
rectb <- total_samples[grep("_recTB_",total_samples$Input.Sample), ] #saline patient group
write.csv(rectb,file = "recTB_patient_group_subset.csv")
#==============Latent TB infection
BL_group<- read.csv("recTB_patient_group_subset.csv",sep = ',',header = T)
library(Rtsne)
## Learning t-SNE Plotting
## Load dataset
df <- BL_group # Loading the immune dataset into a object called IR
## Split df into two objects: 1) containing measurements 2) containing species type
IR_data <- df[ ,2:22] # We are sub-setting df object such as to include 'all rows' and columns 3 to 12.
IR_species <- df[ ,2] # We are sub-setting df object such as to include 'all rows' and column 2.
## Load the t-SNE library
library(Rtsne)
## Run the t-SNE algorithm and store the results into an object called tsne_results
tsne_results <- Rtsne(IR_data, perplexity=2, check_duplicates = FALSE,verbose=T,max_iter=500) # You can change the value of perplexity and see how the plot changes
## Generate the t_SNE plot
par(mfrow=c(1,1)) # To plot two images side-by-side
plot(tsne_results$Y, col = "black", bg= IR_species, pch = 21, cex = 1.5,xlab = "t-SNE 1",
ylab = "t-SNE 2",main = "Recurrent TB Group: PPD,BCG, Baseline and Saline Lung challenge") # Second plot: Color the plot by the real species type (bg= IR_species)
text(tsne_results$Y, labels=df$Group)
#t-SNE plot with ggplot package
library(ggplot2)
tsne_plot <- data.frame(x = tsne_results$Y[,1], y = tsne_results$Y[,2], Challenge_Group = df$Group)
ggplot(tsne_plot) + geom_point(aes(x=x, y=y, color=Challenge_Group))+theme_bw()+
xlab("t-SNE 1") + ylab("t-SNE 2") + ggtitle("Previous TB: PPD,BCG, Baseline $ Saline lung Challenge")+
theme(axis.text=element_text(size=16),axis.title=element_text(size=16,face="bold"))
|
358db25c1bdd9b30bf71d94ffc8459712e477bba
|
35edf35bebc25df564887a2e107b13f5ba47a605
|
/Homework1/man/dmvnorm.Rd
|
d6ae22e47e521ccc6c5689e3126e863370299df1
|
[] |
no_license
|
dengdetian0603/Biostat778_HW1
|
0c8241bb7f67127271527516a523d43c1c65b3c2
|
9694bc444ffd26567f12cb14977a62ddaab676a9
|
refs/heads/master
| 2021-01-21T03:51:40.111609
| 2013-11-13T16:22:31
| 2013-11-13T16:22:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,864
|
rd
|
dmvnorm.Rd
|
\name{dmvnorm}
\alias{dmvnorm}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Fast Multivariate Normal Density
}
\description{
This function evaluates the k-dimensional multivariate Normal density with mean mu and covariance S.
}
\usage{
dmvnorm(x, mu, S, log = TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
a n*k matrix of points to be evaluated.
}
\item{mu}{
a vector of means of length k for the k-dimensional Normal.
}
\item{S}{
a k*k covariance matrix.
}
\item{log}{
If log == TRUE, returns the logged density(by default), otherwise, returns the original density.
}
}
\details{
In this function, Cholesky decomposition was used to compute the Mahalanobis distance term and the determinant of S in the density function. By using the function chol(), the positive definiteness of S is checked. If it is not, the function will return a error message.
}
\value{
The function returns a vector of length n, containing the values of the multivariate Normal density evaluated at the n points.
}
\references{
Nocedal, Jorge, and S. Wright. Numerical optimization, series in operations research and financial engineering. Springer, New York (2006).
}
\author{
Detian Deng
}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
%\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
%}
\examples{
n <- 10
n2 <- n^2
xg <- seq(0, 1, length = n)
yg <- xg
g <- data.matrix(expand.grid(xg, yg))
D <- as.matrix(dist(g))
phi <- 5
S <- exp(-phi * D)
mu <- rep(0, n2)
set.seed(1)
x <- matrix(rnorm(n2), byrow = TRUE, ncol = n2)
mymvpdf<-dmvnorm(x=x, mu=mu, S=S, log = TRUE)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ ~kwd1 }
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
3be4953efaadb1543cc3bcc3e4fee5b645eae7e3
|
060e6cb5651dea54d531a2bf43790c50f89756f4
|
/man/getSoreDurations.Rd
|
b0d5183c80a1af5aa768e12484560a57079fb888
|
[] |
no_license
|
MikeLydeamore/TMI
|
fdadc800417119fef939cf10938703d9fb015b9d
|
87e2c0db71122f248a8131b5a55e58a8ef810e24
|
refs/heads/master
| 2020-03-06T21:36:28.878958
| 2019-10-20T05:25:26
| 2019-10-20T05:25:26
| 127,082,021
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 676
|
rd
|
getSoreDurations.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tmi-functions.R
\name{getSoreDurations}
\alias{getSoreDurations}
\title{Get the per-individual sore durations}
\usage{
getSoreDurations(panel_data, formula, id = ID)
}
\arguments{
\item{panel_data}{Panel data to fit for}
\item{formula}{A formula of the form state~time, where state is the state variable (1=infectious) and time is the observation time}
\item{id}{The ID variable which identifies each individual (provide unquoted)}
}
\value{
A vector of the empricial durations of each skin sore infection, assuming continuous observation.
}
\description{
Get the per-individual sore durations
}
|
e80628bef4462c6c3733d2850ccec7ab427c72fe
|
a4a658d367ddf2cf2ad2f2c381605573cc0228fb
|
/man/get_version.Rd
|
2fa1586b5c4095dc3acef4dd960e78a5a3200e37
|
[
"MIT"
] |
permissive
|
Dschaykib/newsmd
|
1054d016e48f25490906149a51b756f1b4501ffc
|
1614d02eca9c35af7360de86ca1a5ce85251fd9a
|
refs/heads/master
| 2023-04-19T19:56:02.042189
| 2023-04-19T09:54:56
| 2023-04-19T09:54:56
| 141,821,455
| 7
| 2
|
NOASSERTION
| 2023-04-19T09:55:52
| 2018-07-21T14:44:57
|
R
|
UTF-8
|
R
| false
| true
| 826
|
rd
|
get_version.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_version.R
\name{get_version}
\alias{get_version}
\title{Retrieve the version number of a NEWS.md file}
\usage{
get_version(file, latest = TRUE)
}
\arguments{
\item{file}{a path to a file}
\item{latest}{a Boolean, if TRUE (default) only the latest version is
returned}
}
\value{
either a single string or a vector with version numbers
}
\description{
This function tries to extract the version number within a file.
There are a few pitfalls:
* If the word "version" is within the text but denotes a dependency it is
still detected. * If the files has a date before the version it will return
the date instead of the version * It is assumed, that the NEWS.md files is
updated from the top. Ergo the latest version is the first one.
}
|
0fdacb2abefc75924618b6670d340ff76d8480c2
|
766e8ee63386380e3c02e9254ba05b1a43870bd7
|
/01_LF data wrangle.R
|
ec39f3b6398a1f3db14f7ccaa88be95231138337
|
[] |
no_license
|
stephenresearch/Local_Freezeup_Churchill
|
c97813e45d76823760c7e2761aba48313a92564d
|
a6850d8fdec8c9b3c02985dfe8b59cef63071269
|
refs/heads/master
| 2023-07-13T15:36:33.452676
| 2021-08-20T22:25:54
| 2021-08-20T22:25:54
| 398,411,800
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,545
|
r
|
01_LF data wrangle.R
|
library(tidyverse)
library(lubridate)
#library(reshape)
library(weathercan)
# Palette -----------------------------------------------------------------
#just for fun palette
library(wesanderson)
pal <- wes_palette(name = "Zissou1", type = "discrete")
pal2 <- c(pal[1], pal[3])
scale_color_manual(values=wes_palette(n=3, name="Zissou1"))
scale_fill_manual(values=wes_palette(n=3, name="Zissou1"))
# Polar bear data ---------------------------------------------------------
PB <- read.csv("PByearice_2019.csv", header=T)
PB <- mutate(PB, year=Year, LastRelease=ymd(LastRelease), EstIceIn= ymd(EstIceIn), IceOrd=yday(EstIceIn))
ggplot (PB, aes(x=year, y=IceOrd)) + geom_point()
# Hydro data --------------------------------------------------------------
hydro <- read.csv("hydro Readhead Daily__May-12-2021_02_16_04AM.csv", header=T)
hydro <- mutate(hydro, Date=ymd(Date), month=month(Date, label=T),
year=year(Date))
flow <- hydro %>%
filter(month %in% c("Oct", "Nov"), PARAM=="m3/s", year >="1983")%>%
group_by(year, month)%>%
summarise(meanFlow=mean(Value))
ggplot(flow, aes(x=year,y=meanFlow, col=month))+ geom_point()+
scale_color_manual(values=wes_palette("Zissou1", type="discrete"))
# Weather data ------------------------------------------------------------
##load weather data from 1983 to 2020; can skip down
stations_search("Churchill", interval = "day")
##Station 48969 can provide Oct/Nov 2010 to 2019
##overlaped by 44244
#C48969 <-weather_dl(station_id = 48969, start = "1983-01-01", end = "2020-07-15", interval = "day")
#C48969 <- C48969 %>%
# select(station_id, date, year, month, max_temp, mean_temp, min_temp,
# dir_max_gust, spd_max_gust)%>%
# mutate(date = ymd(date), month=month(date, label=T), year=year(date))%>%
# filter(month %in% c("Oct", "Nov"))
#Station 3871 can provide Oct/Nov from 1983 to 2007
C3871 <-weather_dl(station_id = 3871, start = "1983-01-01", end = "2020-07-15", interval = "day")
C3871 <- C3871 %>%
select(station_id, date, year, month, max_temp, mean_temp, min_temp,
dir_max_gust, spd_max_gust)%>%
mutate(date = ymd(date), month=month(date, label=T))%>%
filter(month %in% c("Oct", "Nov"), year <="2004")
##Station 50148 can provide Oct/Nov from 2018 to 2020
##overlapped by station 44244
#C50148 <-weather_dl(station_id = 50148, start = "1983-01-01", end = "2020-12-15", interval = "day")
#C50148 <- C50148 %>%
# select(station_id, date, year, month, max_temp, mean_temp, min_temp,
# dir_max_gust, spd_max_gust)%>%
# mutate(date = ymd(date), month=month(date, label=T), year=year(date))%>%
# filter(month %in% c("Oct", "Nov"))
#Station 44244 can provide Oct/Nov 2012-21
C44244 <-weather_dl(station_id = 44244, start = "2005-01-01", end = "2021-01-01", interval = "day")
C44244 <- C44244 %>%
select(station_id, date, year, month, max_temp, mean_temp, min_temp,
dir_max_gust, spd_max_gust)%>%
mutate(date = ymd(date), month=month(date, label=T))%>%
filter(month %in% c("Oct", "Nov"))
AllWeather <- rbind(C3871,C44244)
write.csv(AllWeather,"AllWeatherChurchill.csv")
Weather <- AllWeather %>%
group_by(year, month)%>%
summarize(avg_temp = mean(mean_temp))
ggplot(Weather, aes(x=year, y=avg_temp, col=month))+ geom_point()+
scale_color_manual(values=wes_palette("Zissou1", type="discrete"))
# Merging data sources ----------------------------------------------------
Environment <- merge(flow, Weather, by=c("year", "month"))
Total <- merge(Environment, PB, by="year")
write.csv(Total,"LocalFreeze.csv")
|
b17e2bce591b183e8bbb1c05aebe79357b3ec7af
|
fc36112ec2687ee3a56086fc121a8e8101c5d62c
|
/man/vocab_lter_scope.Rd
|
a4c09e1b1c3d675adeabd3aad51185ec475715c9
|
[
"MIT"
] |
permissive
|
EDIorg/EMLassemblyline
|
ade696d59147699ffd6c151770943a697056e7c2
|
994f7efdcaacd641bbf626f70f0d7a52477c12ed
|
refs/heads/main
| 2023-05-24T01:52:01.251503
| 2022-11-01T01:20:31
| 2022-11-01T01:20:31
| 84,467,795
| 36
| 17
|
MIT
| 2023-01-10T01:20:56
| 2017-03-09T17:04:28
|
R
|
UTF-8
|
R
| false
| true
| 576
|
rd
|
vocab_lter_scope.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{vocab_lter_scope}
\alias{vocab_lter_scope}
\title{Get the scope of an LTER Controlled Vocabulary term}
\usage{
vocab_lter_scope(id)
}
\arguments{
\item{id}{(numeric) An identification number of a valid term in the LTER
Controlled Vocabulary.}
}
\value{
(character) The scope description for a LTER Controlled Vocabulary
term. Note, not all terms have descriptions.
}
\description{
Get the scope description for a term in the LTER Controlled Vocabulary.
}
\keyword{internal}
|
67d7e921f15c3847383321a59fac25befcda156d
|
8fe55d803ff5b0567cae81c5f5c996b22d0be251
|
/06_Literature Reanalysis (Mouse)/06_Zeisel et al Reanalysis.R
|
fc4f0cf88ec2fed37cfde4a352f4bdbf2b6e2ad8
|
[
"CC-BY-4.0"
] |
permissive
|
samuel-marsh/Marsh_et-al_2022_scRNAseq_Dissociation_Artifacts
|
3493e6f1c54536262178f979f8bdd8909bac7e73
|
aeb696544d8cb2d026ca96899f706537ec296657
|
refs/heads/master
| 2023-04-18T08:27:44.228473
| 2022-03-08T21:43:09
| 2022-03-08T21:43:09
| 449,016,962
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,055
|
r
|
06_Zeisel et al Reanalysis.R
|
# Load Data & Create Object -------------------------------------------
zeisel <- connect(filename = "~/Desktop/Literature Reanalysis/Zeisel et al (Papain 10X)/l6_r4_microglia.loom", mode = "r")
zeisel
zeisel_seurat <- as.Seurat(zeisel)
zeisel$close_all()
# save raw seurat object after conversion from loom
write_rds(zeisel_seurat, "Final_RDS_Objects/zeisel_raw_seurat.RDS")
zeisel_seurat <- read_rds("Final_RDS_Objects/zeisel_raw_seurat.RDS")
# Add mito percentage to data
zeisel_seurat <- PercentageFeatureSet(zeisel_seurat, pattern = "^mt-", col.name = "percent_mito")
# QC Filtering
QC_Plots_Genes(zeisel_seurat, low_cutoff = 400)
QC_Plots_UMI(zeisel_seurat, high_cutoff = 5000)
QC_Plots_Mito(zeisel_seurat)
# QC Filter
zeisel_seurat <- subset(x = zeisel_seurat, subset = nCount_RNA < 5000 & percent_mito < 10 & nFeature_RNA > 400)
# Normalize Data ------------------------------------------------------
zeisel_seurat <- NormalizeData(zeisel_seurat, normalization.method = "LogNormalize", scale.factor = 10000)
zeisel_seurat <- FindVariableFeatures(zeisel_seurat, selection.method = "mean.var.plot", mean.cutoff = c(0.0125, 3), dispersion.cutoff = c(1, Inf))
# Scale the data
all_genes <- rownames(zeisel_seurat)
zeisel_seurat <- ScaleData(zeisel_seurat, features = all_genes, vars.to.regress = c("nCount_RNA", "percent_mito"))
zeisel_seurat <- RunPCA(zeisel_seurat, features = VariableFeatures(object = zeisel_seurat), npcs = 100)
ElbowPlot(zeisel_seurat, ndims = 40)
beep(sound = 2)
zeisel_seurat <- JackStraw(zeisel_seurat)
zeisel_seurat <- ScoreJackStraw(zeisel_seurat, dims = 1:20)
JackStrawPlot(zeisel_seurat, dims = 1:20)
beep(sound = 2)
DimHeatmap(zeisel_seurat, dims = 12)
# Clustering ----------------------------------------------------------
zeisel_seurat <- FindNeighbors(zeisel_seurat, dims = 1:11)
zeisel_seurat <- FindClusters(zeisel_seurat, resolution = 0.2)
zeisel_seurat <- RunTSNE(zeisel_seurat, dims = 1:11)
DimPlot(zeisel_seurat, label = TRUE, reduction = "tsne", label.size = 5)
# Examine Clusters
markers <- FindAllMarkers(zeisel_seurat)
# Cluster 5 are endothelial cells
# Save Object ---------------------------------------------------------
write_rds(zeisel_seurat, "Final_RDS_Objects/zeisel_reanalyzed_clustered.RDS")
# Subset the endothelial cells out of the object
zeisel_micro <- subset(zeisel_seurat, idents = 5, invert = TRUE)
write_rds(zeisel_micro, "Final_RDS_Objects/zeisel_micro_subset_raw.RDS")
# QC Filtering
QC_Plots_Genes(zeisel_micro, low_cutoff = 400)
QC_Plots_UMI(zeisel_micro, high_cutoff = 5000)
QC_Plots_Mito(zeisel_micro)
# Normalize Data ------------------------------------------------------
zeisel_micro <- NormalizeData(zeisel_micro, normalization.method = "LogNormalize", scale.factor = 10000)
zeisel_micro <- FindVariableFeatures(zeisel_micro, selection.method = "mean.var.plot", mean.cutoff = c(0.0125, 3), dispersion.cutoff = c(1, Inf))
# Scale the data
all_genes <- rownames(zeisel_micro)
zeisel_micro <- ScaleData(zeisel_micro, features = all_genes, vars.to.regress = c("nCount_RNA", "percent_mito"))
zeisel_micro <- RunPCA(zeisel_micro, features = VariableFeatures(object = zeisel_micro), npcs = 100)
ElbowPlot(zeisel_micro, ndims = 40)
beep(sound = 2)
zeisel_micro <- JackStraw(zeisel_micro)
zeisel_micro <- ScoreJackStraw(zeisel_micro, dims = 1:20)
JackStrawPlot(zeisel_micro, dims = 1:20)
beep(sound = 2)
DimHeatmap(zeisel_micro, dims = 12)
# Clustering ----------------------------------------------------------
zeisel_micro <- FindNeighbors(zeisel_micro, dims = 1:12)
zeisel_micro <- FindClusters(zeisel_micro, resolution = 0.2)
zeisel_micro <- RunTSNE(zeisel_micro, dims = 1:12)
DimPlot(zeisel_micro, label = TRUE, reduction = "tsne", label.size = 5)
# Examine Clusters
markers <- FindAllMarkers(zeisel_micro)
# Save Object ---------------------------------------------------------
write_rds(zeisel_micro, "Final_RDS_Objects/zeisel_micro_reanalyzed_clustered_FINAL.RDS")
# Plot Activation Score -----------------------------------------------
zeisel_micro <- read_rds("Final_RDS_Objects/zeisel_micro_reanalyzed_clustered_FINAL.RDS")
zeisel_micro <- AddModuleScore(zeisel_micro, features = shared_sig, name = "sg")
# One gene not found Hist2h2aa1. Old synonym not found. Excluded from score.
zeisel_micro <- AddModuleScore(zeisel_micro, features = homeostatic_mg, name = "mg")
# Plot Scores
p <- FeaturePlot(object = zeisel_micro, features = "sg1", cols = c("navy", "gold"), pt.size = 6, reduction = "tsne")
p
ggsave("final_module_plots/zeisel_micro_sg1.pdf", height = 8, width = 9.2)
p <- FeaturePlot(object = zeisel_micro, features = "mg1", cols = c("navy", "gold"), pt.size = 6, reduction = "tsne")
p
ggsave("final_module_plots/zeisel_micro_mg1.pdf", height = 8, width = 9.2)
# Save Module Scored Object
write_rds(zeisel_micro, "Final_RDS_Objects/zeisel_micro_module_scored_FINAL.RDS")
# Check cell number
stats_obj <- read_rds("Final_RDS_Objects/zeisel_micro_module_scored_FINAL.RDS")
stats <- Cluster_Stats_All_Samples(stats_obj)
|
508e31d8cfb5d294a2f4572e84e387e37abbaa28
|
f2cc6cedcabacbd3700bb46ddcefce0bc1215300
|
/one/AnalyzeGDS2771_RollNumber.R
|
5d032a409b9df5a7fe2d67a3d2015befe055f361
|
[] |
no_license
|
hrushikesht/ee622-Assignments
|
e438200cb58207af8d4045531de3b214ba6b6cef
|
8bb53c26178fcd85e4bee4a0d770e999769b4108
|
refs/heads/master
| 2020-12-02T09:28:29.624147
| 2016-08-24T14:56:20
| 2016-08-24T14:56:20
| 66,467,956
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,177
|
r
|
AnalyzeGDS2771_RollNumber.R
|
# 1. Install packages to read the NCBI's GEO microarray SOFT files in R
# 1.Ref. http://www2.warwick.ac.uk/fac/sci/moac/people/students/peter_cock/r/geo/
# 1.1. Uncomment only once to install stuff
#source("https://bioconductor.org/biocLite.R")
#biocLite("GEOquery")
#biocLite("Affyhgu133aExpr")
# 1.2. Use packages # Comment to save time after first run of the program in an R session
library(Biobase)
library(GEOquery)
# Add other libraries that you might need below this line
# 2. Read data and convert to dataframe. Comment to save time after first run of the program in an R session
# 2.1. Once download data from ftp://ftp.ncbi.nlm.nih.gov/geo/datasets/GDS2nnn/GDS2771/soft/GDS2771.soft.gz
# 2.Ref.1. About data: http://www.ncbi.nlm.nih.gov/sites/GDSbrowser?acc=GDS2771
# 2.Ref.2. Study that uses that data http://www.ncbi.nlm.nih.gov/pmc/articles/PMC3694402/pdf/nihms471724.pdf
# 2.Warning. Note that do not use FULL SOFT, only SOFT, as mentioned in the link above. 2.2.R. http://stackoverflow.com/questions/20174284/error-in-gzfilefname-open-rt-invalid-description-argument
gds2771 <- getGEO(filename='C:/Users/Amit/Downloads/GDS2771.soft.gz') # Make sure path is correct as per your working folder. Could be './GDS2771.soft.gz'
eset2771 <- GDS2eSet(gds2771) # See http://www2.warwick.ac.uk/fac/sci/moac/people/students/peter_cock/r/geo/
# 2.2. View data (optional; can be commented). See http://www2.warwick.ac.uk/fac/sci/moac/people/students/peter_cock/r/geo/
eset2771 # View some meta data
featureNames(eset2771)[1:10] # View first feature names
sampleNames(eset2771) # View patient IDs. Should be 192
pData(eset2771)$disease.state #View disease state of each patient. Should be 192
# 2.3. Convert to data frame by concatenating disease.state with data, using first row as column names, and deleting first row
data2771 <- cbind2(c('disease.state',pData(eset2771)$disease.state),t(Table(gds2771)[,2:194]))
colnames(data2771) = data2771[1, ] # the first row will be the header
data2771 = data2771[-1, ]
# 2.4. View data frame (optional; can be commented)
View(data2771)
# WRITE YOUR CODE BELOW THIS LINE
|
f5e45149faf75995d0b82a2d57d47ceaf0641341
|
145d82d84702fc8794f9db674e041c5bc4205a75
|
/1a parte/flex_files/flex_cart.R
|
95dff56c025617e240c35344927e6dbee7477a7f
|
[] |
no_license
|
sanchezvivi/instacart
|
d1ccc2e86a0b456e19f8e11979320e8a6317b594
|
d52cc176aff44f559e29d6808161b0f8a54c4cf2
|
refs/heads/master
| 2022-12-23T00:55:48.132426
| 2020-09-05T21:55:48
| 2020-09-05T21:55:48
| 285,945,756
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 36,120
|
r
|
flex_cart.R
|
# Bibliotecas -------------------------------------------------------------
# biblios <- c('tidyverse','dplyr', 'ggplot2', 'lubridate', 'stringr',
# 'inspectdf', 'skimr', 'naniar', 'visdat', 'tidymodels',
# 'klaR', 'corrplot', 'NetCluster', 'factoextra', 'maptree', 'treemap', 'DT','patchwork')
#biblios <- c('tidyverse', 'stringr', 'janitor', 'inspectdf', 'dplyr', 'skimr', 'plotly', 'RcppRoll', 'lubridate', 'factoextra')
#
#for (i in biblios){
# if(!require(i, character.only = TRUE)){install.packages(paste0(i)); library(i, character.only = TRUE)}
#}
#
# Importando os dados em .csv, usando o read.csv --------------------------
#path <- "data\\"
#path <- "data/"
#file_aisles <- "aisles.csv"
#base_aisles <- read.csv(paste(path,file_aisles,sep = ""))
#
#file_dept <- "departments.csv"
#base_dept <- read.csv(paste(path,file_dept,sep = ""))
#
#file_ord_prior <- "order_products__prior.csv"
#base_ord_prior <- read.csv(paste(path,file_ord_prior,sep = "")) %>% glimpse()
#
#file_ord_train <- "order_products__train.csv"
#base_ord_train <- read.csv(paste(path,file_ord_train,sep = "")) %>% glimpse()
#
#file_orders <- "orders.csv"
#base_orders <- read.csv(paste(path,file_orders,sep = "")) %>% glimpse()
#
#file_products <- "products.csv"
#base_products <- read.csv(paste(path,file_products,sep = "")) %>% glimpse()
#
#x <- tibble(carac = stringr::str_length(base_aisles$aisle))
#
#x %>% ggplot(aes(carac)) +
# geom_histogram(bins = 20)
#
base_aisles <- aisles_raw
base_dept <- departments_raw
base_ord_prior <- op_prior_raw
base_ord_train <- op_train_raw
base_orders <- orders_raw
base_products <- products_raw
# Iniciando Pré-Análises --------------------------------------------------
#sum(!(base_orders$order_id %in% base_ord_prior$order_id))
# base_orders_rec <- base_orders %>% filter(!is.na(days_since_prior_order))
#
#
# # Incluindo o produto na base de pedidos anteriores
# #base_ord_prior %>% left_join(products_raw)
# base_ord_prior_prod <- base_ord_prior %>% left_join(base_products)
# # base_ord_prior_prod <- base_ord_prior_prod[,1:5]
#
# #rm(base_ord_)
#
# base_orders_rec_count <- base_orders_rec %>% group_by(user_id) %>% count() %>% transmute(compras = n)
#
# base_orders_rec_count_10 <- base_orders_rec_count %>% filter(compras <= 10)
# base_orders_rec_count_10 %>% nrow()
#
# base_orders_rec_count_10_complete <- base_orders_rec_count_10 %>% left_join(base_orders_rec)
#
# base_orders_rec_count_10_complete_prod <- base_orders_rec_count_10_complete %>% left_join(base_ord_prior_prod)
#
# base_orders_rec_count_10_complete_prod_dept <- base_orders_rec_count_10_complete_prod %>% left_join(base_dept)
#
# base_graf1 <- base_orders_rec_count_10_complete_prod_dept %>% group_by(department) %>% count() %>% transmute(quantidade = n) %>% arrange(desc(quantidade))
##base_graf1[1:10,] %>% ggplot(aes(x = reorder(department, -quantidade), y = quantidade)) +
# # geom_col(na.rm = TRUE)
#
#
#x <- base_orders_rec_count %>% filter(compras > 10)
#
##mean(x$compras)
#
#base_orders_rec_count_10_complete$compras %>% mean()
#
#
#base_orders %>% filter(is.na(days_since_prior_order)) %>% count()
#
# Analise 2020-08-10 ------------------------------------------------------
## Verificações_Iniciais ---------------------------------------------------
# Verificando se na base_train existem elementos que já estão nas outras bases.
#base_orders %>% skim()
#orders_raw %>% group_by(user_id) %>% summarise(n_pedidos = max(order_number))
#
# n_vezes_mais30 <- orders_raw %>% dplyr::filter(days_since_prior_order == 30) %>% group_by(user_id) %>% summarise(n_30 = n())
#
#max(n_vezes_mais30$n_30)
#
#orders_raw$days_since_prior_order[base_orders$days_since_prior_order == -1] <- -1
#
#base_orders %>% summary()
#
#base_ord_train %>% skim()
#
#nrow(base_orders)
#nrow(base_ord_prior)
#nrow(base_ord_train)
#
##sum(base_ord_prior$order_id %in% base_ord_train$order_id)
## Náo há interseção entre os order_id dessas bases
#
##sum(base_ord_prior$order_id %in% base_orders$order_id)
## Há interseção total entre os 'order_id' da tabela 'order_pior' na tabela 'orders'
#
##sum(base_ord_train$order_id %in% base_orders$order_id)
## há interseção total entre os 'order_id' da tabela 'order_train' na tabela 'orders'
#
# orders_in_ord_train <- sum(base_orders$order_id %in% base_ord_train$order_id)
# #
# orders_in_ord_prior <- sum(base_orders$order_id %in% base_ord_prior$order_id)
# Ao se buscar os order_id da tabela 'orders' na tabela 'order_prior', encontram-se 3214874 (93,4%).
# Ao se buscar os order_id da tabela 'orders' na tabela 'order_train', encontram-se 131209 (3,84%).
# ou seja, existem 75000 (2,19%) de pedidos da tabela 'orders' que não estão em nenhuma das bases ('order_prior' nem 'order_train'). Esse 'order_id' faltantes,
# são pertencentes a base 'order_train', que não está disponível.
#orders_in_ord_prior/nrow(base_orders)
#orders_in_ord_train/nrow(base_orders)
#
#1 - ((orders_in_ord_prior+orders_in_ord_train)/nrow(base_orders))
#
#nrow(base_orders)-(orders_in_ord_prior+orders_in_ord_train)
# Inicio Código -----------------------------------------------------------
# CONCLUSÔES DA ANÁLISE PRÉVIA:
# As bases order_train e order_prior são excludentes, ou seja, os order_id não possuem interseção.
# A base 'order_prior' tem todos os seus order_id encontrados na base 'orders', bem como a base 'order_train'.
# Existem 75k 'order_id', que pertencem a base de testes. Contudo, como essa base de teste não está disponível, podemos remover esses 75k registros.
# AÇÕES
# 1 - Remover 75k registros da base 'orders'.
# 2 - As bases order_train e order_prior, poderão ser mescladas, uma vez que não iremos usar a base para predição.
# Removendo os registros da tabela `orders` que estão categorizados como 'test', uma vez que essas 'order_id' não possuem dados correspondentes nas bases de product_order
base_orders_cl <- orders_raw %>% filter(eval_set != 'test')
# Mesclando as bases 'order_prior' e 'order_train'
base_ord_geral <- dplyr::union(op_prior_raw, op_train_raw)
#base_ord_geral <- base_ord_prior
# Fazendo um left join da base de 'base_prod' com a base de base_aisles e base_dept, para trazer os nomes dos corredores e departamentos
base_products_names <- products_raw %>% left_join(aisles_raw) %>% left_join(departments_raw)
base_products_names <- base_products_names[,c(1:2,5:5)]
# Fazendo um left join da base de order_geral com a base_products_names, para trazer dados dos produtos comprados (nome_produto, corredor e departamento)
base_ord_geral_prod <- base_ord_geral %>% left_join(base_products_names)
# Filtro Média Móvel Vivi -------------------------------------------------
base_orders_cl_mm <- base_orders_cl %>%
filter(order_number != 1) %>%
arrange(user_id, order_number) %>%
mutate(order_hour_of_day = as.numeric(order_hour_of_day)) %>%
group_by(user_id) %>%
mutate(days_ma = roll_mean(days_since_prior_order, 5, fill = NA, na.rm = T)) %>%
ungroup() %>%
glimpse
# filtrando somente os clientes que estão abaixo da mediana
base_orders_cl_mm <- base_orders_cl_mm %>% arrange(user_id,-order_number)
# users_last_day_ma <- base_orders_cl_mm %>% dplyr::group_by(user_id) %>% summarise(ult_ordem = first(order_number), days_ma = nth(days_ma,3)) %>% glimpse()
base_orders_cl_rec <- base_orders_cl_mm %>% filter(days_ma <8)
base_orders_cl_not_rec <- base_orders_cl_mm %>% filter(days_ma >=8)
base_ord_geral_prod_rec <- base_ord_geral_prod %>% dplyr::filter(order_id %in% base_orders_cl_rec$order_id)
base_ord_geral_prod_not_rec <- base_ord_geral_prod %>% dplyr::filter(order_id %in% base_orders_cl_not_rec$order_id)
# Trazendo a coluna user_id
base_ord_geral_prod_rec2 <- base_ord_geral_prod_rec %>% left_join(base_orders_cl_rec)
base_ord_geral_prod_rec2 <- base_ord_geral_prod_rec2[,c(1:8,10,13)]
base_ord_geral_prod_not_rec2 <- base_ord_geral_prod_not_rec %>% left_join(base_orders_cl_not_rec)
base_ord_geral_prod_not_rec2 <- base_ord_geral_prod_not_rec2[,c(1:8,10,13)]
# HIPOTESE
# Compras que tem recorrência, provavelmente é feita, repetindo uma cesta anterior.
# Compras com menor recorrência tem maior variaçao na cesta de compras
# Rodando o modelo para os cem principais recorrentes e os 100 piores recorrentes
#base_orders_cl_not_rec %>%
# na.omit() %>%
# ggplot(aes(x = days_ma)) +
# geom_bar(fill = 'darkgreen') +
# geom_vline(xintercept = 8, color = 'orange',
# linetype = 'dashed') +
# theme_minimal()
# Histograma de Produtos comprados por Ordem ------------------------------
order_n_total <- base_ord_geral_prod_not_rec %>% group_by(order_id) %>% summarise(quant_prod = n(), unid_recompra = sum(reordered))
bin <- order_n_total$quant_prod %>% max()
#order_n_total %>% ggplot(aes(x = quant_prod)) +
# geom_histogram(bins = bin/10) +
# scale_y_sqrt()
# x4 <- function(x) x^4
#
# x_4<- function(x) sqrt(sqrt(x))
# Funções de Transformação ------------------------------------------------
x4 <- function(x) x^4
x_4<- function(x) sqrt(sqrt(x))
x2 <- function(x) x^2
x_2<- function(x) sqrt(x)
#order_n_total %>% ggplot() +
# geom_histogram(aes(x = quant_prod), bins = bin/10,) +
# scale_y_continuous(trans = scales::trans_new(name = "sqrt_sqrt",transform = x_4, inverse = x4)) +
# labs(title = "Histograma de No Produtos comprados")
#
#order_n_total %>% ggplot(aes(x = quant_prod)) +
# geom_freqpoly(bins = bin/10) +
# scale_y_continuous(trans = scales::trans_new(name = "sqrt_sqrt",transform = x_4, inverse = x4)) +
# labs(title = "Histograma de No Produtos comprados")
# Produtos Mais Recorrentes ----------------------------------------------------
# em qual posição do carrinho, se localizam o produtos MAIS recorrentes
rec_ord_cart <- base_ord_geral_prod_rec2 %>% group_by(add_to_cart_order) %>%
summarise(recorrencias = sum(reordered),
total = n()) %>%
mutate(rec_perc = recorrencias/total) %>%
arrange(add_to_cart_order)
#rec_ord_cart %>% ggplot(aes(add_to_cart_order, rec_perc)) +
# geom_col() +
# labs(title = "Gráfico de ordem_carrinho x percentual de produtos recorrentes")
#
# Produtos Menos Recorrentes ----------------------------------------------------
# em qual posição do carrinho, se localizam o produtos não-recorrentes
nao_rec_ord_cart <- base_ord_geral_prod_not_rec2 %>% group_by(add_to_cart_order) %>%
summarise(total = n(),
nao_recorrencia = total - sum(reordered)) %>%
mutate(nao_rec_perc = nao_recorrencia/total) %>%
arrange(add_to_cart_order)
#nao_rec_ord_cart %>% ggplot(aes(add_to_cart_order, nao_rec_perc)) +
# geom_col() +
# labs(title = "Gráfico de ordem_carrinho x percentual de produtos nao_recorrentes")
#
# fazer uma análise pelos produtos que entram primeiro na cesta (por produto), nas compras feitas por clientes pouco recorrentes.
prod_ord_cart <- base_ord_geral_prod_not_rec2 %>% dplyr::group_by(product_name, add_to_cart_order) %>%
summarise(quantidade = n(),
recorrencias = sum(reordered)) %>%
mutate(rec_perc = recorrencias/quantidade) %>%
arrange(-quantidade)
# Definindo a média do número de produtos recorrentes
a <- base_ord_geral_prod_not_rec2$order_id %>% n_distinct() #número de pedidos
b <- base_ord_geral_prod_not_rec2$reordered %>% sum() #numero de produtos
n_prod1 = b/a
(texto1 <- paste("Media Produtos/Ordem = ", round(n_prod1,2), sep = ""))
prod_ord_cart2 <- prod_ord_cart %>% dplyr::group_by(product_name) %>% mutate(perc = recorrencias/sum(recorrencias))
prod_ord_cart2_list <- prod_ord_cart2 %>% group_by(product_name) %>% summarise(recorrencias_total = sum(recorrencias)) %>% arrange(-recorrencias_total)
prod_ord_cart2_list <- prod_ord_cart2_list[1:50,1]
prod_100_n_rec <- prod_ord_cart2 %>% right_join(prod_ord_cart2_list)
# prod_100_n_rec %>% ggplot() +
# geom_tile(aes(product_name,add_to_cart_order, fill = perc*100)) +
# scale_fill_gradient2(low = "white", high = ic_cols("green"), limits = c(0,40),trans = scales::trans_new(name = "quad",transform = x2, inverse = x_2))+
# theme(axis.text.x = element_text(angle = 90, size = 8, hjust = 1)) +
# labs(title = "Heatmap de Produtos x Cart_Order para clientes Não-Recorrentes", fill = "%", x = "Produto", y="Ordem_Cart") +
# theme(axis.text.x = element_text(hjust = 1.0, vjust = 0.3)) +
# geom_hline(yintercept = n_prod1, color = "orange") +
# scale_y_continuous(limits = c(0,20),expand = c(0,0)) +
# geom_text(aes(x = 5, y = n_prod1+0.1, label = texto1 ), size = 3, color = 'orange', hjust = 0, vjust = 0)
hm_n_rec <- prod_100_n_rec %>% ggplot() +
geom_tile(aes(product_name,add_to_cart_order, fill = perc*100)) +
scale_fill_gradient2(low = "white", high = ic_cols("green"), limits = c(0,40),trans = scales::trans_new(name = "quad",transform = x2, inverse = x_2))+
theme(axis.text.x = element_text(angle = 90, size = 8, hjust = 1)) +
labs(title = "Heatmap de Produtos x Cart_Order para clientes Pouco Recorrentes", size.title = 2, fill = "%", x = "Produto", y="Ordem_Cart") +
theme(axis.text.x = element_text(hjust = 1.0, vjust = 0.3)) +
geom_hline(yintercept = n_prod1, color = "orange") +
scale_y_continuous(limits = c(0,20),expand = c(0,0)) +
geom_text(aes(x = 5, y = n_prod1+0.1, label = texto1 ), size = 3, color = 'orange', hjust = 0, vjust = 0)
# ggplotly(hm_n_rec, tooltip = "perc")
# fazer uma análise pelos produtos que entram primeiro na cesta (por produto), nas compras feitas por clientes muito recorrentes.
prod_ord_cart_rec <- base_ord_geral_prod_rec2 %>% dplyr::group_by(product_name, add_to_cart_order) %>%
summarise(quantidade = n(),
recorrencias = sum(reordered)) %>%
mutate(rec_perc = recorrencias/quantidade) %>%
arrange(-quantidade)
# Definindo a média do número de produtos recorrentes
a <- base_ord_geral_prod_rec2$order_id %>% n_distinct() #número de pedidos
b <- base_ord_geral_prod_rec2$reordered %>% sum() #numero de produtos
n_prod2 = b/a
(texto2 <- paste("Média Produtos/Ordem = ", round(n_prod2,2), sep = ""))
prod_ord_cart_rec2 <- prod_ord_cart_rec %>% dplyr::group_by(product_name) %>% mutate(perc = recorrencias/sum(recorrencias))
prod_ord_cart_rec2_list <- prod_ord_cart_rec2 %>% group_by(product_name) %>% summarise(recorrencias_total = sum(recorrencias)) %>% arrange(-recorrencias_total)
prod_ord_cart_rec2_list <- prod_ord_cart_rec2_list[1:50,1]
prod_100_rec <- prod_ord_cart_rec2 %>% right_join(prod_ord_cart_rec2_list)
# prod_100_rec %>% ggplot() +
# geom_tile(aes(product_name,add_to_cart_order, fill = perc*100)) +
# scale_fill_gradient2(low = "white", high = ic_cols("green"), limits = c(0,40), trans = scales::trans_new(name = "quad",transform = x2, inverse = x_2))+
# theme(axis.text.x = element_text(angle = 90, size = 8, hjust = 1)) +
# labs(title = "Heatmap de Produtos x Cart_Order para clientes Recorrentes") +
# theme(axis.text.x = element_text(hjust = 1.0, vjust = 0.3)) +
# geom_hline(yintercept = n_prod2, color = "orange") +
# geom_text(aes(x = 5, y = n_prod2+0.1, label = texto2 ), size = 3, color = 'orange', hjust = 0, vjust = 0) +
# scale_y_continuous(limits = c(0,20),expand = c(0,0))
hm_rec <- prod_100_rec %>% ggplot() +
geom_tile(aes(product_name,add_to_cart_order, fill = perc*100)) +
scale_fill_gradient2(low = "white", high = ic_cols("green"), limits = c(0,40), trans = scales::trans_new(name = "quad",transform = x2, inverse = x_2))+
theme(axis.text.x = element_text(angle = 90, size = 8, hjust = 1)) +
labs(title = "Heatmap de Produtos x Cart_Order para clientes Recorrentes", fill = "%", x = "Produto", y="Ordem_Cart") +
theme(axis.text.x = element_text(hjust = 1.0, vjust = 0.3)) +
geom_hline(yintercept = n_prod2, color = "orange") +
geom_text(aes(x = 5, y = n_prod2+0.1, label = texto2 ), size = 3, color = 'orange', hjust = 0, vjust = 0) +
scale_y_continuous(limits = c(0,20),expand = c(0,0))
#
# # Análise de HClust -------------------------------------------------------
# # Nova Análise de HClust, onde agora o percentual é feito de maneira diferente. Será feita a contabilização dos percentuais por
# # order de inclusão no carrinho. Ou seja, cada ordem_cart terá um total de produtos que somará 100% e cada produtos terá seu percentual
# # na posição do carrinho.
#
# # Buscando os 100 principais produtos da base geral
# library(tidymodels)
#
#
# base_ord_geral_prod_total <- base_ord_geral_prod %>% left_join(base_orders_cl_mm)
# # base_ord_geral_prod_total2 <- base_ord_geral_prod_total[,c(1:8,10,14)]
# base_ord_geral_prod_total2 <- base_ord_geral_prod_total
#
# prod_ord_cart_geral <- base_ord_geral_prod_total2 %>% dplyr::group_by(product_name, add_to_cart_order) %>%
# summarise(quantidade = n(),
# recorrencias = sum(reordered)) %>%
# mutate(rec_perc = recorrencias/quantidade) %>%
# arrange(-quantidade)
#
# prod_ord_cart_geral2 <- prod_ord_cart_geral %>% dplyr::group_by(product_name) %>% mutate(perc = recorrencias/sum(recorrencias))
#
# prod_ord_cart_geral2_list <- prod_ord_cart_geral2 %>% group_by(product_name) %>% summarise(recorrencias_total = sum(recorrencias)) %>% arrange(-recorrencias_total)
#
# prod_ord_cart_geral2_list <- prod_ord_cart_geral2_list[1:50,1]
#
# prod_100_geral <- prod_ord_cart_geral2 %>% right_join(prod_ord_cart_geral2_list)
#
#
#
# ord_cart_prod2 <- prod_ord_cart %>% right_join(prod_ord_cart_geral2_list) %>% dplyr::group_by(add_to_cart_order) %>% mutate(perc = recorrencias/sum(recorrencias))
# mat_similarity_ord <- ord_cart_prod2 %>% dplyr::select(product_name,add_to_cart_order, perc) %>% pivot_wider(names_from = add_to_cart_order, values_from = perc)
# # mat_similarity_ord <- mat_similarity_ord[1:100,]/
#
#
# # Removendo os NAs
# mat_similarity_ord <- mat_similarity_ord %>% replace(is.na(.),0)
#
# # Normalizando os dados
# receita <- mat_similarity_ord %>% recipe(product_name ~ .) %>%
# step_normalize(all_numeric(), -all_outcomes())
#
# prep_receita <- prep(receita)
#
# mat_similarity_ord_norm <- juice(prep_receita)[[ncol(juice(prep_receita))]] %>% cbind(juice(prep_receita)[,-ncol(juice(prep_receita))])
#
# # Coletando as colunas com NA ou NaN
# x <- inspect_na(mat_similarity_ord_norm)
# col_remove <- x$col_name[x$pcnt == 100]
#
# # Removendo as colunas com NA ou NaN
# mat_similarity_ord_norm <- mat_similarity_ord_norm %>% select(-c(col_remove))
#
# class(mat_similarity_ord_norm$.)
#
# # dist_mat <- get_dist(mat_similarity_ord_norm, upper = TRUE, diag = TRUE)
# #
# # n <- 5
# # vet_clust <- c(2:((nrow(mat_similarity_ord_norm)-1)/n))
# # vet_clust <- vet_clust * n
# # vet_clust2 <- c(c(2:9),vet_clust)
# # silho <- tibble(k = numeric(), silho_avg = numeric(), negatives = numeric(), singulares = numeric())
# #
# # for (i in vet_clust2){
# # cutted <- hcut(mat_similarity_ord_norm, hc_func = "hclust", hc_method = "ward.D2", k=i, graph = TRUE)
# # negativos <- sum(cutted$silinfo$widths$sil_width < 0) / length(cutted$silinfo$widths$sil_width)
# # sing <- nrow(as_tibble(cutted$cluster) %>% group_by(value) %>% count() %>% filter(n == 1))
# # silho <- silho %>% bind_rows(c(k = i, silho_avg = cutted$silinfo$avg.width, negatives = negativos, singulares = sing))
# # print(i)
# # }
# #
# # best_k <- silho$k[silho$silho_avg == max(silho$silho_avg)]
# # best_k_neg <- silho$k[silho$negatives == min(silho$negatives)]
# #
# # p1 <- silho %>% ggplot(aes(x = k)) +
# # geom_line(aes(y = silho_avg), color = "blue") +
# # # geom_rect(aes(xmin = 35, xmax = 53, ymin = 0.33, ymax = 0.35), alpha = 1/500, color = "red", fill = "green") +
# # # geom_vline(xintercept = c(35, 53), show.legend = TRUE) +
# # geom_line(aes(y = singulares/40), color = "red") +
# # scale_y_continuous(
# # name = "Avg_Silh",
# # sec.axis = sec_axis(trans =~.*40, name = "n_Sing_Clust")
# # ) +
# # geom_vline(xintercept = 6)
# # p1
#
# k_select <- 4
# cutted_ord_not_rec <- hcut(mat_similarity_ord_norm, hc_func = "hclust", hc_method = "ward.D2", k=k_select, graph = TRUE)
#
# cutted_ord_not_rec$labels <- as.character(mat_similarity_ord_norm$.)
#
# # fviz_dend(cutted_ord_not_rec, k = k_select,
# # cex = 0.6,
# # type = "rectangle",
# # k_colors = c("darkgreen","orange"),
# # labels_track_height = 0.8,
# # # k_colors = c(1:4,6),
# # ggtheme = theme_light(),
# # main = "Dendrograma de Produtos - Clientes Não Recorrentes",
# # ylim = c(-30,60),
# # )
#
#
#
#
#
#
#
#
# # Hcluster com clientes recorrentes
# ord_cart_prod_rec2 <- prod_ord_cart_rec %>% right_join(prod_ord_cart_geral2_list) %>% dplyr::group_by(add_to_cart_order) %>% mutate(perc = recorrencias/sum(recorrencias))
# mat_similarity_ord_rec <- ord_cart_prod_rec2 %>% dplyr::select(product_name,add_to_cart_order, perc) %>% pivot_wider(names_from = add_to_cart_order, values_from = perc)
# # mat_similarity_ord_rec <- mat_similarity_ord_rec[1:100,]
#
#
# # Removendo os NAs
# mat_similarity_ord_rec <- mat_similarity_ord_rec %>% replace(is.na(.),0)
#
# # Normalizando os dados
# receita <- mat_similarity_ord_rec %>% recipe(product_name ~ .) %>%
# step_normalize(all_numeric(), -all_outcomes())
#
# prep_receita <- prep(receita)
#
# mat_similarity_ord_rec_norm <- juice(prep_receita)[[ncol(juice(prep_receita))]] %>% cbind(juice(prep_receita)[,-ncol(juice(prep_receita))])
#
# # Coletando as colunas com NA ou NaN
# x <- inspect_na(mat_similarity_ord_rec_norm)
# col_remove <- x$col_name[x$pcnt == 100]
#
# # Removendo as colunas com NA ou NaN
# mat_similarity_ord_rec_norm <- mat_similarity_ord_rec_norm %>% select(-c(col_remove))
#
#
#
#
# # dist_mat <- get_dist(mat_similarity_ord_rec_norm, upper = TRUE, diag = TRUE)
#
# # n <- 5
# # vet_clust <- c(2:((nrow(mat_similarity_ord_rec_norm)-1)/n))
# # vet_clust <- vet_clust * n
# # vet_clust2 <- c(c(2:9),vet_clust)
# # silho <- tibble(k = numeric(), silho_avg = numeric(), negatives = numeric(), singulares = numeric())
# #
# # for (i in vet_clust2){
# # cutted <- hcut(mat_similarity_ord_rec_norm, hc_func = "hclust", hc_method = "ward.D2", k=i, graph = TRUE)
# # negativos <- sum(cutted$silinfo$widths$sil_width < 0) / length(cutted$silinfo$widths$sil_width)
# # sing <- nrow(as_tibble(cutted$cluster) %>% group_by(value) %>% count() %>% filter(n == 1))
# # silho <- silho %>% bind_rows(c(k = i, silho_avg = cutted$silinfo$avg.width, negatives = negativos, singulares = sing))
# # print(i)
# # }
# #
# # best_k <- silho$k[silho$silho_avg == max(silho$silho_avg)]
# # best_k_neg <- silho$k[silho$negatives == min(silho$negatives)]
# #
# # p1 <- silho %>% ggplot(aes(x = k)) +
# # geom_line(aes(y = silho_avg), color = "blue") +
# # # geom_rect(aes(xmin = 35, xmax = 53, ymin = 0.33, ymax = 0.35), alpha = 1/500, color = "red", fill = "green") +
# # # geom_vline(xintercept = c(35, 53), show.legend = TRUE) +
# # geom_line(aes(y = singulares/40), color = "red") +
# # scale_y_continuous(
# # name = "Avg_Silh",
# # sec.axis = sec_axis(trans =~.*40, name = "n_Sing_Clust")
# # ) +
# # geom_vline(xintercept = 4)
# # p1
#
# k_select <- 4
# cutted_ord_rec <- hcut(mat_similarity_ord_rec_norm, hc_func = "hclust", hc_method = "ward.D2", k=k_select, graph = TRUE)
#
# cutted_ord_rec$labels <- as.character(mat_similarity_ord_rec_norm$.)
#
# # fviz_dend(cutted_ord_rec, k = k_select,
# # cex = 0.6,
# # type = "rectangle",
# # k_colors = c("darkgreen","orange"),
# # labels_track_height = 0.8,
# # # k_colors = c(1:4,6),
# # ggtheme = theme_light(),
# # main = "Dendrograma de Produtos - Clientes Recorrentes",
# # ylim = c(-30,60),
# # )
#
#
#
#
#
# library(dendextend)
#
# dend_not_rec <- as.dendrogram(cutted_ord_not_rec)
# dend_rec <- as.dendrogram(cutted_ord_rec)
# tang <- dendlist(dend_not_rec, dend_rec)
# dendlist(dend_not_rec, dend_rec) %>%
# untangle(method = "step1side") %>% # Find the best alignment layout
# tanglegram(labels_cex = 0.6,
# margin_inner = 15,
# k_labels = 4,
# k_branches = 4,
# axes = FALSE,
# lwd = 2,
# main_left = "Produtos - Clientes Pouco Recorrentes",
# cex_main_left = 1,
# main_right = "Produtos - Clientes Recorrentes",
# cex_main_right = 1,
# dLeaf = 0.1
# )
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
# # Montando um hclust de produto por order de carrinho para produtos de clientes pouco recorrentes
# mat_similarity <- prod_ord_cart2 %>% dplyr::select(product_name,add_to_cart_order, perc) %>%
# pivot_wider(names_from = add_to_cart_order, values_from = perc)
#
# mat_similarity2 <- mat_similarity[1:200,]
#
# # Montando um hclust de produto por order de carrinho para produtos de clientes MAIS recorrentes
# mat_similarity_rec <- prod_ord_cart_rec2 %>% dplyr::select(product_name,add_to_cart_order, perc) %>%
# pivot_wider(names_from = add_to_cart_order, values_from = perc)
#
# mat_similarity_rec2 <- mat_similarity_rec[1:200,]
#
# # vet_clust <- c(1:((nrow(mat_similarity2)-2)/20))
# # vet_clust <- c(1:9)
# # vet_clust <- vet_clust * 20
# # vet_clust2 <- c(c(2:10),vet_clust)
# # silho <- tibble(k = numeric(), silho_avg = numeric(), negatives = numeric(), singulares = numeric())
# #
# # for (i in vet_clust2){
# # cutted <- hcut(mat_similarity2, hc_func = "hclust", hc_method = "complete", k=i, graph = TRUE)
# # negativos <- sum(cutted$silinfo$widths$sil_width < 0) / length(cutted$silinfo$widths$sil_width)
# # sing <- nrow(as_tibble(cutted$cluster) %>% group_by(value) %>% count() %>% filter(n == 1))
# # silho <- silho %>% bind_rows(c(k = i, silho_avg = cutted$silinfo$avg.width, negatives = negativos, singulares = sing))
# # print(i)
# # }
# #
# # best_k <- silho$k[silho$silho_avg == max(silho$silho_avg)]
# # best_k_neg <- silho$k[silho$negatives == min(silho$negatives)]
#
# #p1 <- silho %>% ggplot(aes(x = k)) +
# # geom_line(aes(y = silho_avg), color = "blue") +
# # # geom_rect(aes(xmin = 35, xmax = 53, ymin = 0.33, ymax = 0.35), alpha = 1/500, color = "red", fill = "green") +
# # # geom_vline(xintercept = c(35, 53), show.legend = TRUE) +
# # geom_line(aes(y = singulares/40), color = "red") +
# # scale_y_continuous(
# # name = "Avg_Silh",
# # sec.axis = sec_axis(trans =~.*40, name = "n_Sing_Clust")
# # )
# #p1
#
# #?dist(mat_similarity2, method = )
#
# cutted_not_rec <- hcut(mat_similarity2, hc_func = "hclust", hc_method = "complete", k=20, graph = TRUE)
# cutted_not_rec$labels <- mat_similarity2$product_name
#
#
# cutted_rec <- hcut(mat_similarity_rec2, hc_func = "hclust", hc_method = "complete", k=20, graph = TRUE)
# cutted_rec$labels <- mat_similarity_rec2$product_name
#
# #cutted_not_rec$size
#
# #fviz_dend(cutted_not_rec, k = 20,
# # cex = 0.7,
# # type = "rectangle",
# # k_colors = c("darkgreen","orange"),
# # labels_track_height = 0.8,
# # # k_colors = c(1:4,6),
# # ggtheme = theme_light(),
# # main = "Dendrograma de Produtos - Clientes Não Recorrentes")
# #
# # fviz_dend(cutted_rec, k = 20,
# # cex = 0.7,
# # type = "rectangle",
# # k_colors = c("darkgreen","orange"),
# # labels_track_height = 0.8,
# # # k_colors = c(1:4,6),
# # ggtheme = theme_light(),
# # main = "Dendrograma de Produtos - Clientes Recorrentes")
# #
# #
# #
# #
# #cutted_not_rec$size
# #cutted_rec$size
#
#
# base_ord_geral_prod_total <- base_ord_geral_prod %>% left_join(base_orders_cl_mm)
# base_ord_geral_prod_total2 <- base_ord_geral_prod_total[,c(1:8,10,14)]
#
# prod_ord_cart_geral <- base_ord_geral_prod_total2 %>% dplyr::group_by(product_name, add_to_cart_order) %>%
# summarise(quantidade = n(),
# recorrencias = sum(reordered)) %>%
# mutate(rec_perc = recorrencias/quantidade) %>%
# arrange(-quantidade)
#
# prod_ord_cart_geral2 <- prod_ord_cart_geral %>% dplyr::group_by(product_name) %>% mutate(perc = recorrencias/sum(recorrencias))
#
# prod_ord_cart_geral2_list <- prod_ord_cart_geral2 %>% group_by(product_name) %>% summarise(recorrencias_total = sum(recorrencias)) %>% arrange(-recorrencias_total)
#
# prod_ord_cart_geral2_list <- prod_ord_cart_geral2_list[1:50,1]
#
# prod_100_geral <- prod_ord_cart_geral2 %>% right_join(prod_ord_cart_geral2_list)
#
#
#
# ord_cart_prod2 <- prod_ord_cart %>% right_join(prod_ord_cart_geral2_list) %>% dplyr::group_by(add_to_cart_order) %>% mutate(perc = recorrencias/sum(recorrencias))
# mat_similarity_ord <- ord_cart_prod2 %>% dplyr::select(product_name,add_to_cart_order, perc) %>% pivot_wider(names_from = add_to_cart_order, values_from = perc)
# # mat_similarity_ord <- mat_similarity_ord[1:100,]/
#
#
# # Removendo os NAs
# mat_similarity_ord <- mat_similarity_ord %>% replace(is.na(.),0)
#
# # Normalizando os dados
# receita <- mat_similarity_ord %>% recipe(product_name ~ .) %>%
# step_normalize(all_numeric(), -all_outcomes())
#
# prep_receita <- prep(receita)
#
# mat_similarity_ord_norm <- juice(prep_receita)[[ncol(juice(prep_receita))]] %>% cbind(juice(prep_receita)[,-ncol(juice(prep_receita))])
#
# # Coletando as colunas com NA ou NaN
# x <- inspect_na(mat_similarity_ord_norm)
# col_remove <- x$col_name[x$pcnt == 100]
#
# # Removendo as colunas com NA ou NaN
# mat_similarity_ord_norm <- mat_similarity_ord_norm %>% select(-c(col_remove))
#
# class(mat_similarity_ord_norm$.)
#
# # dist_mat <- get_dist(mat_similarity_ord_norm, upper = TRUE, diag = TRUE)
# #
# # n <- 5
# # vet_clust <- c(2:((nrow(mat_similarity_ord_norm)-1)/n))
# # vet_clust <- vet_clust * n
# # vet_clust2 <- c(c(2:9),vet_clust)
# # silho <- tibble(k = numeric(), silho_avg = numeric(), negatives = numeric(), singulares = numeric())
# #
# # for (i in vet_clust2){
# # cutted <- hcut(mat_similarity_ord_norm, hc_func = "hclust", hc_method = "ward.D2", k=i, graph = TRUE)
# # negativos <- sum(cutted$silinfo$widths$sil_width < 0) / length(cutted$silinfo$widths$sil_width)
# # sing <- nrow(as_tibble(cutted$cluster) %>% group_by(value) %>% count() %>% filter(n == 1))
# # silho <- silho %>% bind_rows(c(k = i, silho_avg = cutted$silinfo$avg.width, negatives = negativos, singulares = sing))
# # print(i)
# # }
# #
# # best_k <- silho$k[silho$silho_avg == max(silho$silho_avg)]
# # best_k_neg <- silho$k[silho$negatives == min(silho$negatives)]
# #
# # p1 <- silho %>% ggplot(aes(x = k)) +
# # geom_line(aes(y = silho_avg), color = "blue") +
# # # geom_rect(aes(xmin = 35, xmax = 53, ymin = 0.33, ymax = 0.35), alpha = 1/500, color = "red", fill = "green") +
# # # geom_vline(xintercept = c(35, 53), show.legend = TRUE) +
# # geom_line(aes(y = singulares/40), color = "red") +
# # scale_y_continuous(
# # name = "Avg_Silh",
# # sec.axis = sec_axis(trans =~.*40, name = "n_Sing_Clust")
# # ) +
# # geom_vline(xintercept = 6)
# # p1
#
# k_select <- 4
# cutted_ord_not_rec <- hcut(mat_similarity_ord_norm, hc_func = "hclust", hc_method = "ward.D2", k=k_select, graph = TRUE)
#
# cutted_ord_not_rec$labels <- as.character(mat_similarity_ord_norm$.)
#
# fviz_dend(cutted_ord_not_rec, k = k_select,
# cex = 0.6,
# type = "rectangle",
# k_colors = c("darkgreen","orange"),
# labels_track_height = 0.8,
# # k_colors = c(1:4,6),
# ggtheme = theme_light(),
# main = "Dendrograma de Produtos - Clientes Não Recorrentes",
# ylim = c(-30,60),
# )
#
#
#
#
#
#
#
#
# # Hcluster com clientes recorrentes
# ord_cart_prod_rec2 <- prod_ord_cart_rec %>% right_join(prod_ord_cart_geral2_list) %>% dplyr::group_by(add_to_cart_order) %>% mutate(perc = recorrencias/sum(recorrencias))
# mat_similarity_ord_rec <- ord_cart_prod_rec2 %>% dplyr::select(product_name,add_to_cart_order, perc) %>% pivot_wider(names_from = add_to_cart_order, values_from = perc)
# # mat_similarity_ord_rec <- mat_similarity_ord_rec[1:100,]
#
# mat_similarity_ord_rec$product_name == mat_similarity_ord$product_name
#
# juice(prep_receita)[[ncol(juice(prep_receita))]] == mat_similarity_ord_rec$product_name
#
# mat_similarity_ord_rec_norm$. == mat_similarity_ord$product_name
#
# # Removendo os NAs
# mat_similarity_ord_rec <- mat_similarity_ord_rec %>% replace(is.na(.),0)
#
# # Normalizando os dados
# receita <- mat_similarity_ord_rec %>% recipe(product_name ~ .) %>%
# step_normalize(all_numeric(), -all_outcomes())
#
# prep_receita <- prep(receita)
#
# mat_similarity_ord_rec_norm <- juice(prep_receita)[[ncol(juice(prep_receita))]] %>% cbind(juice(prep_receita)[,-ncol(juice(prep_receita))])
#
# # Coletando as colunas com NA ou NaN
# x <- inspect_na(mat_similarity_ord_rec_norm)
# col_remove <- x$col_name[x$pcnt == 100]
#
# # Removendo as colunas com NA ou NaN
# mat_similarity_ord_rec_norm <- mat_similarity_ord_rec_norm %>% select(-c(col_remove))
#
#
#
#
# # dist_mat <- get_dist(mat_similarity_ord_rec_norm, upper = TRUE, diag = TRUE)
#
# # n <- 5
# # vet_clust <- c(2:((nrow(mat_similarity_ord_rec_norm)-1)/n))
# # vet_clust <- vet_clust * n
# # vet_clust2 <- c(c(2:9),vet_clust)
# # silho <- tibble(k = numeric(), silho_avg = numeric(), negatives = numeric(), singulares = numeric())
# #
# # for (i in vet_clust2){
# # cutted <- hcut(mat_similarity_ord_rec_norm, hc_func = "hclust", hc_method = "ward.D2", k=i, graph = TRUE)
# # negativos <- sum(cutted$silinfo$widths$sil_width < 0) / length(cutted$silinfo$widths$sil_width)
# # sing <- nrow(as_tibble(cutted$cluster) %>% group_by(value) %>% count() %>% filter(n == 1))
# # silho <- silho %>% bind_rows(c(k = i, silho_avg = cutted$silinfo$avg.width, negatives = negativos, singulares = sing))
# # print(i)
# # }
# #
# # best_k <- silho$k[silho$silho_avg == max(silho$silho_avg)]
# # best_k_neg <- silho$k[silho$negatives == min(silho$negatives)]
# #
# # p1 <- silho %>% ggplot(aes(x = k)) +
# # geom_line(aes(y = silho_avg), color = "blue") +
# # # geom_rect(aes(xmin = 35, xmax = 53, ymin = 0.33, ymax = 0.35), alpha = 1/500, color = "red", fill = "green") +
# # # geom_vline(xintercept = c(35, 53), show.legend = TRUE) +
# # geom_line(aes(y = singulares/40), color = "red") +
# # scale_y_continuous(
# # name = "Avg_Silh",
# # sec.axis = sec_axis(trans =~.*40, name = "n_Sing_Clust")
# # ) +
# # geom_vline(xintercept = 4)
# # p1
#
# k_select <- 4
# cutted_ord_rec <- hcut(mat_similarity_ord_rec_norm, hc_func = "hclust", hc_method = "ward.D2", k=k_select, graph = TRUE)
#
# cutted_ord_rec$labels <- as.character(mat_similarity_ord_rec_norm$.)
#
# fviz_dend(cutted_ord_rec, k = k_select,
# cex = 0.6,
# type = "rectangle",
# k_colors = c("darkgreen","orange"),
# labels_track_height = 0.8,
# # k_colors = c(1:4,6),
# ggtheme = theme_light(),
# main = "Dendrograma de Produtos - Clientes Recorrentes",
# ylim = c(-30,60),
# )
#
#
#
#
# cutted_ord_rec$cluster
#
#
# library(dendextend)
#
# dend_not_rec <- as.dendrogram(cutted_ord_not_rec)
# dend_rec <- as.dendrogram(cutted_ord_rec)
#
# tang <- dendlist(dend_not_rec, dend_rec)
#
# dendlist(dend_not_rec, dend_rec) %>%
# untangle(method = "step1side") %>% # Find the best alignment layout
# tanglegram(labels_cex = 0.6,
# margin_inner = 15,
# k_labels = 4,
# k_branches = 4,
# axes = FALSE,
# lwd = 2,
# main_left = "Produtos - Clientes Pouco Recorrentes",
# cex_main_left = 1,
# main_right = "Produtos - Clientes Recorrentes",
# cex_main_right = 1,
# dLeaf = 0.1
# )
#
#
|
0eaacb4a208ff2fd53ddac28cece45ef90a2ddd6
|
5f93a26137b2e5f09eb970f2ea610963eec8d642
|
/R/gradientPickerD3_example.R
|
5e8c056e976252a8f3e8a070e7c9ae9cbadb231d
|
[] |
no_license
|
peikert/gradientPickerD3
|
82e91a11dc083107426db0ba755838ef07ce7365
|
23cc649bc03bc34bf55838b90f0f03215580b7cf
|
refs/heads/master
| 2021-03-27T09:56:13.250000
| 2017-10-11T13:49:46
| 2017-10-11T13:49:46
| 95,441,859
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,370
|
r
|
gradientPickerD3_example.R
|
#' gradientPickerD3_example
#'
#' Creates an example shiny app which include the gradientPickerD3 and a rendered table for gradientPickerD3 return value. By clicking the reload button new random ticks will be generated.
#'
#' @import shiny stats
#' @export
gradientPickerD3_example = function() {
shinyApp(ui <- fluidPage(
div(
style = "width: 300px;",
actionButton('btreload', 'reload'),
gradientPickerD3Output('gpD3'),
br(),
tableOutput("text")
)
),
shinyServer(function(input, output) {
payload <- list(
colors = c("purple", "blue", "green", "yellow", "red"),
ticks = c(-5, -2, 0, 2, 5)
)
output$gpD3 <- renderGradientPickerD3(gradientPickerD3(payload))
observeEvent(input$btreload, {
payload <- list(
colors = c("purple", "blue", "green", "yellow", "red"),
ticks = c(-5, stats::runif(1, -4, -1), 0, stats::runif(1, 1, 4), 5)
)
output$gpD3 <-
renderGradientPickerD3(gradientPickerD3(payload))
})
observeEvent(input$gpD3_table, {
output$text <- renderTable({
df <-
as.data.frame(matrix(
unlist(input$gpD3_table),
ncol = 3,
byrow = TRUE
))
colnames(df) <- c('position', 'color', 'tick')
return(df)
})
})
}))
}
|
f1145e3ec730070a3ec48d40ac39f1b1cd94cb5f
|
b406befbab9bcf0ea7c0eac21d6e163c3888ef9a
|
/example/ex.chisqstretch.pow.R
|
9de0f545599679953d33c581f67f894528bbeb0d
|
[] |
no_license
|
olli0601/abc.star
|
0f5fc8a3d1d4ba7edb3719dc46b688454bab9cfa
|
dbda96d2e52b096e74a2fbdef32f3443b45da7a7
|
refs/heads/master
| 2016-09-15T03:35:43.924846
| 2016-04-14T20:11:38
| 2016-04-14T20:11:38
| 8,214,145
| 0
| 1
| null | 2016-04-03T13:43:53
| 2013-02-15T07:11:58
|
R
|
UTF-8
|
R
| false
| false
| 1,104
|
r
|
ex.chisqstretch.pow.R
|
n.of.x <- 60
n.of.y <- 60
# compute ABC tolerances
cali <- vartest.calibrate(n.of.x=n.of.x, n.of.y=n.of.y, tau.l=1/2, tau.u=2, what='CR', alpha=0.01)
# problematic ABC tolerances for ABC inference:
# although power is not zero, does not plateau around 1, and still high around rho=1 (desirable),
# the power is not maximised at the point of equality (rho=1).
rho <- seq(0.1, 3, len=1024)
tmp <- data.frame(rho=rho, power=vartest.pow(rho, n.of.x, n.of.y-1, cali['c.l'], cali['c.u']))
p <- ggplot(tmp,aes(x=rho,y=power)) + geom_line() + labs(y='Power\n(ABC acceptance probability)')
print(p)
# power increases with tau.u and becomes flat
tmp <- lapply(seq(1.2,2.8,0.4),function(tau.u)
{
cali <- vartest.calibrate(n.of.x=n.of.x, n.of.y=n.of.y, tau.l=1/tau.u, tau.u=tau.u, what='CR', alpha=0.01)
data.table(rho=rho, power=vartest.pow(rho, n.of.x, n.of.y-1, cali['c.l'], cali['c.u']), tau.u=tau.u)
})
tmp <- do.call('rbind', tmp)
p <- ggplot(tmp,aes(x=rho,y=power,colour=tau.u, group=tau.u)) + geom_line() + labs(x=expression(rho), y='Power\n(ABC acceptance probability)')
print(p)
|
4b428f84fc830f3b8bf8d690da33017918177369
|
d6c0595084b6f9f3a541df39d7e54ad2cdd29d8e
|
/man/resizeImage.Rd
|
7a8032b4a912bf1057f3367a43e6d14f14881991
|
[] |
no_license
|
cran/phenopix
|
2b6e5b2ea601de51c312e692e04ec050529bf5e8
|
9220b65ba06c6e08e1df76a365db0b78364ed684
|
refs/heads/master
| 2023-08-19T07:31:53.401802
| 2023-08-09T13:50:02
| 2023-08-09T15:30:47
| 94,452,244
| 7
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,731
|
rd
|
resizeImage.Rd
|
\name{resizeImage}
\alias{resizeImage}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Resize an image (and a tROI) to a given pixel resolution
%% ~~function to do ... ~~
}
\description{
This function allows to resize a sample image and a correspondent ROI to a given pixel resolution to be used as background to spatila analysis plots.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
resizeImage(image.in, image.out, roi.in, roi.out, npixels)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{image.in}{
The complete path to your original image
}
\item{image.out}{
The complete path (filename with extension included) where the new, resized image will be saved. %% ~~Describe \code{path_ROIs} here~~
}
\item{roi.in}{
The complete path to your original roi.data
}
\item{roi.out}{
The complete path (filename with extension included) where the new, resized roi.data.RData will be saved
%% ~~Describe \code{roi.names} here~~
}
\item{npixels}{
As in \code{extractVIs} to aggregate more than one pixel
%% ~~Describe \code{roi.names} here~~
}
}
\details{
Coupled with spatial analysis and image resize (see \code{extractVIs()} and specifically argument \code{npixels} for details), this function allows to update a selected image and the correspondent ROI to a smaller size. This is exactly what is done internally when \code{extractVIs()} is called with npixels different from 1. The resized image can be used (together with the roi.data object) to plot results from spatially explicit phase extraction. See the vignette `spatial' for illustrated examples.
}
\author{
Gianluca Filippa <gian.filippa@gmail.com>
}
|
17b595c09a90ad2012e7edc9659e19dc4c2c1c38
|
623b144df283c68b8fe833f975a4068a98fed59e
|
/run_analysis.R
|
9f66e375996c1018059f023fb18cbdaf11e3fb2e
|
[] |
no_license
|
sougatabh/getting_and_cleaning_data
|
e463f74baa79cd7e37b8b6c8101f33fafc4bb57d
|
8e20ced9f8b7976e99388410fb86d1fd7cf2e188
|
refs/heads/master
| 2020-05-04T23:54:17.116325
| 2019-04-05T02:57:44
| 2019-04-05T02:57:44
| 179,558,847
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,738
|
r
|
run_analysis.R
|
library(data.table)
library("reshape2")
#### Download the Data ###
fileName <- "UCIdata.zip"
url <- "http://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
dir <- "UCI HAR Dataset"
# File download verification. If file does not exist, download to working directory.
if(!file.exists(fileName)){
download.file(url,fileName, mode = "wb")
}
# Unzip the file if not exists
if(!file.exists(dir)){
unzip(fileName, files = NULL, exdir=".")
}
#Read the files
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt")
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt")
X_test <- read.table("UCI HAR Dataset/test/X_test.txt")
X_train <- read.table("UCI HAR Dataset/train/X_train.txt")
y_test <- read.table("UCI HAR Dataset/test/y_test.txt")
y_train <- read.table("UCI HAR Dataset/train/y_train.txt")
activity_labels <- read.table("UCI HAR Dataset/activity_labels.txt")
features <- read.table("UCI HAR Dataset/features.txt")
## Merge the training and test data set.
# 1. Merges the training and the test sets to create one data set.
merged_ds <- rbind(X_train,X_test)
#head(merged_ds)
#Extract only mean()|std only
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
mean_std <- grep("mean()|std()", features[, 2])
merged_ds <- merged_ds[,mean_std]
#head(merged_ds)
# 4. Appropriately labels the data set with descriptive variable names.
clean_feature_name <-sapply(features[, 2], function(x) {gsub("[()]", "",x)})
names(merged_ds) <-clean_feature_name[mean_std]
#Merge train and test subject
merge_subject <- rbind(subject_train, subject_test)
names(merge_subject) <- 'subject'
merge_activity <- rbind(y_train, y_test)
names(merge_activity) <- 'activity'
#Merge subject activity and dataset
merged_ds<- cbind(merge_subject,merge_activity, merged_ds)
# 3. Uses descriptive activity names to name the activities in the data set
# group the activity column of dataSet, re-name lable of levels with activity_levels,
#and apply it to dataSet.
act_group <- factor(ds$activity)
levels(act_group) <- activity_labels[,2]
ds$activity <- act_group
#5.From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
base_ds <- melt(merged_ds,(id.vars=c("subject","activity")))
second_independent_ds <- dcast(base_ds, subject + activity ~ variable, mean)
names(second_independent_ds)[-c(1:2)] <- paste("[mean of]" , names(second_independent_ds)[-c(1:2)] )
write.table(second_independent_ds, "tidy_data.txt", sep = ",")
write.table(second_independent_ds, "tidy_data.txt", sep = ",",row.names = FALSE)
|
08e0ba37245d80d4ed3b3526b4714d35f698aaaf
|
dcaef87c4a61606e623560a924fba0b97bb50f20
|
/decimals.R
|
d131e66fb56c1a7ceee57b1d515477ae2031a34e
|
[] |
no_license
|
nmolanog/refous
|
bcbb9b3b9029bea1a1546de6f228275afe4f0860
|
6c519c068f53bf2b2cf5d1884d44461dd86655ac
|
refs/heads/master
| 2020-03-22T22:03:15.367335
| 2018-10-29T23:53:00
| 2018-10-29T23:54:41
| 140,730,505
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,034
|
r
|
decimals.R
|
rm(list=ls())
library(tidyverse)
div_int<-function(bse,x){
div_int<-x%/%bse
res<-x-div_int*bse
return(c(div_int,res))
}
fromx_to_10_v2<-function(bse,x){
if(x%>%str_detect("\\.")){
x%>%as.character()%>%strsplit("\\.")%>%unlist->break_x
break_x%>%map(~as.character(.)%>%strsplit("")%>%unlist%>%as.numeric())->sep_digits_radix
sep_digits_radix%>%unlist()->sep_digits
base_exp_1<-(length(sep_digits_radix[[1]])-1):0
base_exp_2<-1:(length(sep_digits_radix[[2]]))
base_exp<-c(base_exp_1,-base_exp_2)
}else {
x%>%as.character()%>%strsplit("")%>%unlist%>%as.numeric()->sep_digits
if(any(sep_digits>=bse)){stop(paste("error: ", sep_digits[sep_digits>=bse], "are not digits for the given base"))}
base_exp<-(length(sep_digits)-1):0
}
return(sum((bse^base_exp)*(sep_digits)))
}
fromx_to_10_v2(3,112)
fromx_to_10_v2(3,112.12121121212)
from10_to_x_v2<-function(bse,x,dgts=15){
x0<-x
dec_part<-NULL
if(x%>%str_detect("\\.")){
x%>%as.character()%>%strsplit("\\.")%>%unlist->break_x
break_x[2]%>%{paste0(".",.)}%>%as.numeric()->temp_dec
for(i in 1:dgts){
(temp_dec*bse)%>%as.character()%>%strsplit("\\.")%>%unlist->temp_split
dec_part<-c(dec_part,temp_split[1])
if(is.na(temp_split[2])){temp_split[2]<-0}
temp_dec<-temp_split[2]%>%{paste0(".",.)}%>%as.numeric()
}
x0<-as.numeric(break_x[1])
}
newnumber<-NULL
res_vect<-NULL
flag<-T
while(flag){
temp<-div_int(bse,x0)
newnumber<-c(newnumber,temp[2])
x0<-temp[1]
res_vect<-c(res_vect,x0)
if(x0<bse){flag<-F}
}
newnumber<-c(newnumber,res_vect[length(res_vect)])
newnumber%>%rev %>%c(".",dec_part)->newnumber
return(newnumber%>%paste(collapse="")%>%as.numeric())
}
from10_to_x_v2(5,54.124)%>%print(digits=20)
from10_to_x_v2(5,7.11)
fromx_to_10_v2(5,2.11)+fromx_to_10_v2(5,3.11)->sum_N
from10_to_x_v2(5,sum_N)
fromx_to_10_v2(5,13)/fromx_to_10_v2(5,10)->sum_N
from10_to_x_v2(5,sum_N)
fromx_to_10_v2(5,2.13)*fromx_to_10_v2(5,4)->sum_N
from10_to_x_v2(5,sum_N)
|
aa7fadfd6c5043411333b512c12fd3e4f6538026
|
d0717c9f78e4da0be6e8150640082a92c8bd10ae
|
/RegressionTests/move_files.R
|
96acc617a62a5d157c741ed243b9ec005a5b5a7e
|
[] |
no_license
|
bleutner/caret
|
b230f73042744b9ce22340b5e21bc74e9f6a2242
|
b317b14d8f47d0995b5c09a3ec7f96297d7ff07e
|
refs/heads/master
| 2021-01-18T11:42:32.834582
| 2015-06-15T15:56:33
| 2015-06-15T15:56:33
| 37,474,793
| 1
| 0
| null | 2015-06-15T15:46:38
| 2015-06-15T15:46:38
| null |
UTF-8
|
R
| false
| false
| 2,275
|
r
|
move_files.R
|
setwd("/Users/kuhna03/Code/github/caret/RegressionTests")
#############################################################################
newPath <- paste(format(Sys.time(), "%Y_%m_%d_%H"),
packageDescription("caret")$Version,
sep = "__")
dir.create(paste0("~/tmp/", newPath))
testFiles <- list.files(file.path(getwd(), "Code"),
full.names = TRUE)
## package archived:
testFiles <- testFiles[!grepl("(Mlda)|(RFlda)", testFiles)]
newFiles <- paste0("~/tmp/", newPath, "/", basename(testFiles))
file.copy(testFiles, newFiles)
#############################################################################
## write out makefile here and save to code directory prior to copy
rFiles <- list.files(file.path(getwd(), "Code"), pattern = "R$")
## package archived:
rFiles <- rFiles[!grepl("(Mlda)|(RFlda)", rFiles)]
header <- paste(sort(rFiles), "Data: ", sort(rFiles), "\n", sep = "")
strt <- paste("\t@date '+ %Y-%m-%d %H:%M:%S: Starting ",
gsub(".R$", "", sort(rFiles)), "'\n", sep = "")
batch <- paste("\t@R CMD BATCH --vanilla ", sort(rFiles), "\n", sep = "")
fini <- paste("\t@date '+ %Y-%m-%d %H:%M:%S: Finished ",
gsub(".R$", "", sort(rFiles)), "'\n\n", sep = "")
rdata <- paste(sort(rFiles), "Data", sep = "")
over <- length(rdata) %% 3
if(over > 0) rdata <- c(rdata, rep("", 3 - over))
deps <- matrix(rdata, nrow = 3)
deps <- apply(deps, 2, function(x) paste("\t", paste(x, collapse = " ")))
deps[1] <- gsub("\t", "all: ", deps[1])
deps[1:(length(deps) - 1)] <- paste(deps[1:(length(deps) - 1)], " \\\n", sep = "")
deps[length(deps)] <- paste(deps[length(deps)], "\n\n")
cat(deps, sep = "")
mf <- c("SHELL = /bin/bash\n\n", deps,
paste(header, strt, batch, fini, sep = ""))
cat(mf, sep = "", file = file.path("~/tmp", newPath, "makefile"))
#############################################################################
## missing tests
if(FALSE){
library(caret)
mods <- names(getModelInfo())
testfiles <- gsub(".R", "", rFiles, fixed = TRUE)
testFiles <- list.files(file.path(getwd(), "Code"))
modelFiles <- list.files("/Users/kuhna03/Code/github/caret/pkg/caret/inst/models")
modelFiles[!(modelFiles %in% testFiles)]
}
|
1d011a576f381537c75c57ac3b8b160b3c570d76
|
a8f01b9017964981d97f3720e50908c11ca0f99b
|
/R/03_formalize.R
|
8b415d7c1d50be52d10dfa9a77780ad38a81d5d8
|
[] |
no_license
|
jeffnorville/map
|
eda5f12ffe952054cffee896aa4d28f9ddbdf6df
|
75eb41e501b3a9b7f3dcb8b5742ebe68a00ed7ba
|
refs/heads/master
| 2021-07-04T08:27:41.832563
| 2020-09-02T08:35:00
| 2020-09-02T08:35:00
| 167,517,511
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,401
|
r
|
03_formalize.R
|
#spatialisation only to DB
# format of form commun
# working directory
wd <- list()
# commonly used paths in my working directory
wd$data <- "C:/Users/Jeff Norville/Documents/R/map/data/"
wd$output <- "C:/Users/Jeff Norville/Documents/R/map/output/"
#rm(list=ls())
# appel packages
require(foreign)
require(dplyr)
require(sf)
require(rpostgis)
library(dplyr)
library(tidyr)
#ini files
gethost <- Sys.getenv("dbhost")
getdbname <- Sys.getenv("dbname")
getusername <- Sys.getenv("user")
getpassword <- Sys.getenv("passwd")
# database
con <- dbConnect("PostgreSQL",
dbname = getdbname,
host = gethost,
user = getusername,
password = getpassword)
# isPostgresqlIdCurrent(con) #boolean, checks if postgres instance is alive
# pgPostGIS(con) #check that postgis is installed in db
# define queries
# select_gtlist <- "SELECT * FROM information_schema.tables WHERE table_schema = 'aropaj' AND table_name like 'gt%'"
# tbls_aropaj <- dbGetQuery(con, select_gtlist)
# liste_db_GT <- tbls_aropaj$table_name
# for each region
# sel_area_clc <- "select * from clc_mask_pra02336"
# sel_area_rpg <- "select * from rpg_mask_pra02336"
# tbls_rpgpra02336 <- dbGetQuery(con, sel_area_rpg)
# tbls_clcpra02336 <- dbGetQuery(con, sel_area_clc)
# tbls_rpgpra02336 <- st_read(con, sel_area_rpg)
# tbls_clcpra02336 <- st_read(con, sel_area_clc)
# ^^^^^^^^^^^^^^^^^^^all broken
# class(tbls_clcpra02336)
tbls_clcpra02336 <- st_read(con, layer = "clc_mask_pra02336")
tbls_rpgpra02336 <- st_read(con, layer = "rpg_mask_pra02336")
class(tbls_rpgpra02336)
plot(tbls_rpgpra02336$geom)
plot(tbls_clcpra02336$geom)
head(tbls_rpgpra02336)
summary(tbls_rpgpra02336)
# this takes some time !!!!
# tbl_form1fromR <- st_union(tbls_clcpra02336, tbls_rpgpra02336, )
# plot(tbls_clcpra02336)
head(tbls_clcpra02336)
summary(tbls_clcpra02336)
plot(tbls_clcpra02336$geom)
query <- paste(
'SELECT "name", "name_long", "geometry"',
'FROM "ne_world"',
'WHERE ("continent" = \'Africa\');'
)
class(tbls_clcpra02336)
plot(tbls_clcpra02336$geom)
summary(tbls_rpgpra02336)
#assign landuse values
# library(dplyr)
# library(tidyr)
table %>%
gather(key = "pet") %>%
left_join(lookup, by = "pet") %>%
spread(key = pet, value = class)
# https://stackoverflow.com/questions/35636315/replace-values-in-a-dataframe-based-on-lookup-table
|
6f869cf9fc15516d7453596ea43ab9e453e35a10
|
ef9ba3e4ece365ddbc6c67fcf69d5d6a3457de1e
|
/R/plot functions.R
|
b2a394308fbf0205dbd7fbc63c8a7d718d117e2b
|
[] |
no_license
|
gheger11/eigenangles
|
573616bf40cffc3a58beea0570796bf1386c45d7
|
17248d7601e57f52318ab383b4fd9478dd6d58db
|
refs/heads/master
| 2022-05-25T23:02:54.988694
| 2020-04-18T17:56:19
| 2020-04-18T17:56:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,414
|
r
|
plot functions.R
|
plot.eigenangles<-function(tbl,component=1, colour='blue'){
ggplot(tbl %>% extract_component(component))+
geom_hline(aes(yintercept = integration_angles), colour=colour)+
geom_hline(aes(yintercept = -conservation_angles), colour=colour)+
geom_hline(yintercept=0, colour='black')+
coord_polar(theta='y',start=pi,direction=-1)+ylim(c(-1,1))+xlim(c(0,1))+
annotate(label='integration',y=2/3,x=1/2,geom='text',size=2)+
annotate(label='conservation',y=-2/3,x=1/2,geom='text',size=2)+
facet_wrap(~batch_)
}
plot.eigenangles.parametric<-function(tbl,component=1, colour='blue'){
ggplot(tbl %>% extract_component(component))+
geom_point(aes(x=k, y=integration_angles), colour=colour)+
geom_point(aes(x=k, y=-conservation_angles), colour=colour)+
geom_hline(aes(yintercept = integration_angles*is.na(k)), colour=colour)+
geom_hline(aes(yintercept = -conservation_angles*is.na(k)), colour=colour)+
geom_hline(yintercept=0, colour='black')+
coord_polar(theta='y',start=pi,direction=-1)+ylim(c(-1,1))+xlim(c(0,max(tbl$k,na.rm=TRUE)))+
annotate(label='integration',y=2/3,x=max(tbl$k,na.rm=TRUE)/2,geom='text',size=2)+
annotate(label='conservation',y=-2/3,x=max(tbl$k,na.rm=TRUE)/2,geom='text',size=2)+
facet_wrap(~batch_)
}
plot.eigenangles.benchmark<-function(tbl,component=1){
ggplot(tbl %>% extract_component(component))+
geom_point(aes(x=k, y=integration_angles, colour=algorithm))+
geom_point(aes(x=k, y=-conservation_angles, colour=algorithm))+
geom_hline(aes(yintercept = integration_angles*is.na(k), colour=algorithm))+
geom_hline(aes(yintercept = -conservation_angles*is.na(k), colour=algorithm))+
geom_hline(yintercept=0, colour='black')+
coord_polar(theta='y',start=pi,direction=-1)+ylim(c(-1,1))+xlim(c(0,max(tbl$k,na.rm=TRUE)))+
annotate(label='integration',y=2/3,x=max(tbl$k,na.rm=TRUE)/2,geom='text',size=2)+
annotate(label='conservation',y=-2/3,x=max(tbl$k,na.rm=TRUE)/2,geom='text',size=2)+
facet_wrap(~batch_)
}
#
# plot.eigenangles<-function(tbl,ref='none',dim=1){
# ggplot(tbl %>% extract_dim(dim))+
# aes(x=k_, y=angles_, colour=algorithm_)+
# geom_point()+
# geom_hline(aes(yintercept = angles_*is.na(k_), colour=algorithm_))+
# geom_hline(yintercept=0, colour='black')+
# coord_polar(theta='y')+ylim(c(0,2))+
# facet_wrap(~batch_)
# }
tanmean<-function(tbl, component=1) UseMethod("tanmean")
tanmean.eigenangles<-function(tbl, component=1){
tbl %>% extract_component(component) %>%
dplyr::summarise(
integration_angles=atan(mean(tanpi(integration_angles)))/pi,
conservation_angles=atan(mean(tanpi(conservation_angles)))/pi
) %>% structure(class=c('eigenangles.tanmean',class(.)))
}
tanmean.eigenangles.parametric<-function(tbl, component=1){
tbl %>% extract_component(component) %>%
group_by(k) %>%
dplyr::summarise(
integration_angles=atan(mean(tanpi(integration_angles)))/pi,
conservation_angles=atan(mean(tanpi(conservation_angles)))/pi
) %>% structure(class=c('eigenangles.tanmean.parametric',class(.)))
}
tanmean.eigenangles.benchmark<-function(tbl, component=1){
tbl %>% extract_component(component) %>%
group_by(algorithm,k) %>%
dplyr::summarise(
integration_angles=atan(mean(tanpi(integration_angles)))/pi,
conservation_angles=atan(mean(tanpi(conservation_angles)))/pi
) %>% structure(class=c('eigenangles.benchmark.tanmean',class(.)))
}
plot.eigenangles.tanmean<-function(tbl, colour='blue'){
ggplot(tbl)+
geom_hline(aes(yintercept = integration_angles), colour=colour)+
geom_hline(aes(yintercept = -conservation_angles), colour=colour)+
geom_hline(yintercept=0, colour='black')+
coord_polar(theta='y',start=pi,direction=-1)+ylim(c(-1,1))+xlim(c(0,1))+
annotate(label='integration',y=2/3,x=1/2,geom='text')+
annotate(label='conservation',y=-2/3,x=1/2,geom='text')
}
plot.eigenangles.tanmean.parametric<-function(tbl, colour='blue'){
ggplot(tbl)+
geom_point(aes(x=k, y=integration_angles), colour=colour)+
geom_point(aes(x=k, y=-conservation_angles), colour=colour)+
geom_hline(aes(yintercept = integration_angles*is.na(k)), colour=colour)+
geom_hline(aes(yintercept = -conservation_angles*is.na(k)), colour=colour)+
geom_hline(yintercept=0, colour='black')+
coord_polar(theta='y',start=pi,direction=-1)+ylim(c(-1,1))+xlim(c(0,max(tbl$k,na.rm=TRUE)))+
annotate(label='integration',y=2/3,x=max(tbl$k,na.rm=TRUE)/2,geom='text')+
annotate(label='conservation',y=-2/3,x=max(tbl$k,na.rm=TRUE)/2,geom='text')
}
plot.eigenangles.benchmark.tanmean<-function(tbl){
ggplot(tbl)+
geom_point(aes(x=k, y=integration_angles, colour=algorithm))+
geom_point(aes(x=k, y=-conservation_angles, colour=algorithm))+
geom_hline(aes(yintercept = integration_angles*is.na(k), colour=algorithm))+
geom_hline(aes(yintercept = -conservation_angles*is.na(k), colour=algorithm))+
geom_hline(yintercept=0, colour='black')+
coord_polar(theta='y',start=pi,direction=-1)+ylim(c(-1,1))+xlim(c(0,max(tbl$k,na.rm=TRUE)))+
annotate(label='integration',y=2/3,x=max(tbl$k,na.rm=TRUE)/2,geom='text')+
annotate(label='conservation',y=-2/3,x=max(tbl$k,na.rm=TRUE)/2,geom='text')
}
extract_component<-function(tbl,component){
tbl$integration_angles %<>% map(~.x[component %>% min(length(.x))]) %<>% unlist
tbl$conservation_angles %<>% map(~.x[component %>% min(length(.x))]) %<>% unlist
return(tbl)
}
#
# plot.eigenangles.tanmean<-function(tbl,ref='none',dim=1){
# ggplot(tbl)+
# aes(x=k_, y=angles_, colour=algorithm_)+
# geom_point()+
# geom_hline(aes(yintercept = angles_*is.na(k_), colour=algorithm_))+
# geom_hline(yintercept=0, colour='black')+
# coord_polar(theta='y')+ylim(c(0,2))+xlim(c(min(tbl$k_,na.rm=TRUE)-1,max(tbl$k_,na.rm=TRUE)))
# }
# viz_meantan<-function(tbl,ref='none',dim=1){
# ggplot(tbl %>% extract_dim(dim) %>%
# group_by(algorithm_,k_) %>%
# dplyr::summarise(angles_=atan(mean(tanpi(angles_)))/pi))+
# aes(x=k_, y=angles_, colour=algorithm_)+
# geom_point()+
# geom_hline(aes(yintercept = angles_*is.na(k_), colour=algorithm_))+
# geom_hline(yintercept=0, colour='black')+
# coord_polar(theta='y')+ylim(c(0,2))+xlim(c(min(tbl$k_,na.rm=TRUE)-1,max(tbl$k_,na.rm=TRUE)))
# }
#
# viz_meansincos<-function(tbl,ref='none',dim=1){
# ggplot(tbl %>% extract_dim(dim) %>%
# group_by(algorithm_,k_) %>%
# dplyr::summarise(angles_=atan(mean(sinpi(angles_))/mean(cospi(angles_)))/pi))+
# aes(x=k_, y=angles_, colour=algorithm_)+
# geom_point()+
# geom_hline(aes(yintercept = angles_*is.na(k_), colour=algorithm_))+
# geom_hline(yintercept=0, colour='black')+
# coord_polar(theta='y')+ylim(c(0,2))+xlim(c(min(tbl$k_,na.rm=TRUE)-1,max(tbl$k_,na.rm=TRUE)))
# }
#
# viz_angles_corrections<-function(ref, ...){
# list(...) -> corrections
# corrections %>% map(~class(.x[[1]])) %>% equals("list") -> parametric
# ggplot(data.frame(
# k = corrections[parametric] %>% map(seq_along) %>% unlist,#à changer
# correction = corrections[parametric] %>% imap(~.y %>% rep(length(.x))) %>% unlist,#à changer
# angle = corrections[parametric] %>% map(~.x %>% map(~.x %>% map(~.x[[1]]))) %>% unlist,
# batch = corrections[parametric] %>% map(~.x %>% map(names)) %>% unlist
# ))
# }
|
a3c6bc4ae40048a06d2f5c1286a8f19e5d9b11c2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/cyphr/examples/key_sodium.Rd.R
|
1fc4ba0cac261d97025713c22a32c984ad46501a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 343
|
r
|
key_sodium.Rd.R
|
library(cyphr)
### Name: key_sodium
### Title: Symmetric encryption with sodium
### Aliases: key_sodium
### ** Examples
# Create a new key
key <- cyphr::key_sodium(sodium::keygen())
key
# With this key encrypt a string
secret <- cyphr::encrypt_string("my secret string", key)
# And decrypt it again:
cyphr::decrypt_string(secret, key)
|
88fc5dcaf09323ddaa32b1ce8271f192dbd16026
|
a226f4b4cf54dd0e8164a727d24dca99e79e1354
|
/tests/testthat/test_createGADS.R
|
7b6c690b7eecd28fe63fae6a7b5f1a4c2e637ee8
|
[] |
no_license
|
beckerbenj/eatGADS
|
5ef0bdc3ce52b1895aaaf40349cbac4adcaa293a
|
e16b423bd085f703f5a548c5252da61703bfc9bb
|
refs/heads/master
| 2023-09-04T07:06:12.720324
| 2023-08-25T11:08:48
| 2023-08-25T11:08:48
| 150,725,511
| 0
| 1
| null | 2023-09-12T06:44:54
| 2018-09-28T10:41:21
|
R
|
UTF-8
|
R
| false
| false
| 806
|
r
|
test_createGADS.R
|
# load test data
# load(file = "tests/testthat/helper_data.rda")
load(file = "helper_data.rda")
allList <- mergeLabels(df1 = df1, df2 = df2)
### create
test_that("GADS DB creation", {
expect_message(createGADS(allList = allList, pkList = pkList, fkList = fkList, filePath = ":memory:"),
"filePath points to work memory")
expect_message(createGADS(allList = df1, pkList = list(df1 = "ID1"), filePath = ":memory:"),
"filePath points to work memory")
expect_error(createGADS(allList = df1, pkList = "ID1", filePath = ":memory:"), "All input lists have to be named")
expect_error(createGADS(allList = list(df1), pkList = list(df1 = "ID1"), filePath = ":memory:"), "no applicable method for 'createGADS' applied to an object of class \"list\"")
})
|
672bf1eaf50ecb998847a1e751aea4ba70e213af
|
299585457e6f3fd9c3e82769db4d82becc67d05a
|
/man/hyperg.Rd
|
37595a6c0d6a354c2303582eb149aeabcb66a1e3
|
[] |
no_license
|
jcfisher/backbone
|
39ca2d01f5e99cab28fefd456a69b02a5a6c9fa8
|
89fd0f4b0d786534d398559d8cf72db1abb65b16
|
refs/heads/master
| 2020-08-05T06:28:38.456269
| 2019-10-03T15:42:41
| 2019-10-03T15:42:41
| 212,430,280
| 0
| 0
| null | 2019-10-03T15:42:45
| 2019-10-02T20:01:09
| null |
UTF-8
|
R
| false
| true
| 955
|
rd
|
hyperg.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hyperg.R
\name{hyperg}
\alias{hyperg}
\title{Compute hypergeometric backbone}
\usage{
hyperg(B)
}
\arguments{
\item{B}{Matrix: Bipartite network}
}
\value{
list(positive, negative).
positive gives matrix of probability of ties above the observed value.
negative gives matrix of probability of ties below the observed value.
}
\description{
`hyperg` computes the probability of observing
a higher or lower edge weight using the hypergeometric distribution.
Once computed, use \code{\link{backbone.extract}} to return
the backbone matrix for a given alpha value.
}
\examples{
hypergeometric_bb <- hyperg(davis)
}
\references{
\href{https://doi.org/10.1007/s13278-013-0107-y}{Neal, Zachary. 2013. “Identifying Statistically Significant Edges in One-Mode Projections.” Social Network Analysis and Mining 3 (4). Springer: 915–24. DOI:10.1007/s13278-013-0107-y.}
}
|
dd11c840e59209f033d571ecef0ed59672fad233
|
fe4cd16ffb13b2f29c12ffd520c81cee0c23f7f0
|
/man/import_qiime_otu_tax.Rd
|
8d1175208db3e40fd285c808363cc4f65c4a2e36
|
[] |
no_license
|
xinchoubiology/phyloseq
|
64648ee089fe42bb94a934bb559ce1c307e371b0
|
6eeb569025d330c5b1b709c103075b37b2feeaff
|
refs/heads/master
| 2020-05-20T19:24:22.015998
| 2015-06-18T17:24:38
| 2015-06-18T17:24:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,136
|
rd
|
import_qiime_otu_tax.Rd
|
\name{import_qiime_otu_tax}
\alias{import_qiime_otu_tax}
\title{Import now legacy-format QIIME OTU table as a list of two matrices.}
\usage{
import_qiime_otu_tax(file, parseFunction =
parse_taxonomy_qiime, verbose = TRUE, parallel = FALSE)
}
\arguments{
\item{file}{(Required). The path to the qiime-formatted
file you want to import into R. Can be compressed (e.g.
\code{.gz}, etc.), though the details may be OS-specific.
That is, Windows-beware.}
\item{parseFunction}{(Optional). An optional custom
function for parsing the character string that contains
the taxonomic assignment of each OTU. The default parsing
function is \code{\link{parse_taxonomy_qiime}},
specialized for splitting the \code{";"}-delimited
strings and also attempting to interpret greengenes
prefixes, if any, as that is a common format of the
taxonomy string produced by QIIME.}
\item{verbose}{(Optional). A \code{\link{logical}}.
Default is \code{TRUE}. Should progresss messages be
\code{\link{cat}}ted to standard out?}
\item{parallel}{(Optional). Logical. Should the parsing
be performed in parallel?. Default is \code{FALSE}. Only
a few steps are actually parallelized, and for most
datasets it will actually be faster and more efficient to
keep this set to \code{FALSE}. Also, to get any benefit
at all, you will need to register a parallel ``backend''
through one of the backend packages supported by the
\code{\link{foreach-package}}.}
}
\value{
A list of two matrices. \code{$otutab} contains the OTU
Table as a numeric matrix, while \code{$taxtab} contains
a character matrix of the taxonomy assignments.
}
\description{
Now a legacy-format, older versions of QIIME produced an
OTU file that typically contains both OTU-abundance and
taxonomic identity information in a tab-delimted table.
If your file ends with the extension \code{.biom}, or if
you happen to know that it is a biom-format file, or if
you used default settings in a version of QIIME of
\code{1.7} or greater, then YOU SHOULD USE THE
BIOM-IMPORT FUNCTION instead, \code{\link{import_biom}}.
}
\details{
This function uses chunking to perform both the reading
and parsing in blocks of optional size, thus constrain
the peak memory usage. feature should make this importer
accessible to machines with modest memory, but with the
caveat that the full numeric matrix must be a manageable
size at the end, too. In principle, the final tables will
be large, but much more efficiently represented than the
character-stored numbers. If total memory for storing the
numeric matrix becomes problematic, a switch to a sparse
matrix representation of the abundance -- which is
typically well-suited to this data -- might provide a
solution.
}
\examples{
otufile <- system.file("extdata", "GP_otu_table_rand_short.txt.gz", package="phyloseq")
import_qiime_otu_tax(otufile)
}
\seealso{
\code{\link{import}}
\code{\link{merge_phyloseq}}
\code{\link{phyloseq}}
\code{\link{import_qiime}}
\code{\link{read_tree}}
\code{\link{read_tree_greengenes}}
\code{\link{import_env_file}}
}
|
65cdf893c8cfd9f84088fcbbd94533178ad6a38d
|
e6d57bdc1566ccf94d64a19c9efb0e4f00b51190
|
/app_server.R
|
1edba24e10f29d9c0ee2d7afc8b979eee1eb4981
|
[
"MIT"
] |
permissive
|
tomgerber/Interactive-Midwest-Data-Report
|
642abfe643c18ab149ee202191af0479c56e8760
|
b7b4b7f4a921f2ccee0862a8a511b889bb1ae4ea
|
refs/heads/master
| 2023-01-13T04:54:38.512807
| 2020-11-11T05:58:56
| 2020-11-11T05:58:56
| 311,876,451
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 405
|
r
|
app_server.R
|
midwest_data <- midwest
library("dplyr")
library("plotly")
source("app_ui.R")
source("./scripts/bar_chart.R")
source("./scripts/scatter_plot.R")
server <- function(input, output) {
output$chart <- renderPlotly({
return(bar_chart(midwest_data, input$analysis_var, input$color))
})
output$scatter <- renderPlot({
return(scatter_plot(midwest_data, input$which_state, input$point_type))
})
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.