blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
735112502aea41d9730601ef0956b640c8d4b245
|
e034ce0925fd1d88d51b01ed49b3ba5fbbbdf58a
|
/class_scripts/probset2.R
|
56addef63c397f0da55e94b3722b5435bb3a15c6
|
[] |
no_license
|
datafordemocracy/lppp5540_sld
|
87f9269c070ebf6e5a00b01fc9ff490add40cb1f
|
f1206cee70725787c6e9c36bf05baf5c2108966d
|
refs/heads/master
| 2021-01-06T20:30:05.486236
| 2020-04-24T15:53:21
| 2020-04-24T15:53:21
| 241,480,658
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,756
|
r
|
probset2.R
|
# .........................
# Saving Lives with Data II
# Problem Set 1
# Wrangling and Regression in R
# Exploring SCI Data
# .........................
# Working further with Save the Children International's current data, in this problem set
# you'll make some additional changes to the data, generate visualizations, and extend the
# modeling we begain in class. Be sure to save this R file with your initials, write (and
# execute) code to generate changes/figures/models, and add some verbal explanation using comments.
# .........................
# 0. Read in the SCI data, load necessary libraries ----
# Use the the revised rds data frame we created during class.
# .........................
# 1. Additional wrangling ----
# a. mutate ethnic_comp, country_income, and ext_supp into factors;
# set the sequences of the resulting levels with intention.
# b. locate the remaining 2-category variables that are currently formatted as characters
# (e.g., containing yes, no responses) and reformat them all as factors -- ideally all together.
# c. As you proceed to the next parts of the problem set, you might find you want to use another
# variable that first needs to be reformated (or recoded, or releveled);
# if so, add those changes here.
# .........................
# 2. Visualization ----
# Focusing on peak size of a displacement event as the outcome of interest, visualize the
# relation of peak size and FIVE different potential predictor variables (these should be
# variables we could plausibly use to predict size, so should not be a function of size or
# a characteristic we would not know ahead of time (e.g.,duration, conflict end)).
# Make sure at least one of the potential variables is numeric and at least one is a
# factor/categorical (as these should be visualized with different kinds of figure types).
# For at least one figure (more if you like), add in additional information by mapping
# color, size, or shape.
# After each figure, add some explanation of what you are seeing.
# .........................
# 3. Linear modeling ----
# a. Continuing to focus on peak size as the outcome of interest, generate THREE additional
# linear regression models that expand on what we worked on in class. These might include
# the incorporation of additional and different predictors, additional or different
# transformations of the predictors or the outcome, additional interactions, and the like.
# For each model you submit, verbally indicate which variables appear to be most important
# in the model; and use the model to simulate outcomes and consider how well the model
# does in replicating the outcomes we observe in this data set. Which of your three models
# do you prefer
|
da02eb87bcd9e8c2eb2191e8c37bb1a6c6ed9dae
|
05a62c2797d2ab194e82498122e855c9b1537559
|
/Cicero links separated.r
|
3f275470cd457db80450afb21ce8598e29c52707
|
[] |
no_license
|
jdavisucd/Single-cell-multiomics-reveals-the-complexity-of-TGF-signalling-to-chromatin-in-iPSC-derived-kidney
|
e7a81a4b96680b11b2fca38090e05f0f085798d4
|
b38f29307cd6b6a0d9af1c6c76319f638eaf10ed
|
refs/heads/main
| 2023-04-15T22:02:18.736365
| 2022-11-03T10:50:47
| 2022-11-03T10:50:47
| 564,415,654
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,003
|
r
|
Cicero links separated.r
|
library(Signac)
library(monocle3)
library(cicero)
library(Seurat)
library(EnsDb.Hsapiens.v86)
library(BSgenome.Hsapiens.UCSC.hg38)
library(GenomicRanges)
library(SeuratWrappers)
library(future)
# This script splits a Signac object by sample and calculates cis-co-accessability
# networks through Cicero and adds them to the individual objects
args = commandArgs(trailingOnly=TRUE)
save.dir <- "/scratch/10309667/SeuratOutputs/Kidney_Organoids"
combined <- readRDS("/scratch/10309667/SeuratOutputs/Kidney_Organoids/combined_Relabelled.rds")
plan("multiprocess", workers = 4)
options(future.globals.maxSize = 30000 * 1024^2) # for 30 Gb RAM
annotation <- GetGRangesFromEnsDb(ensdb = EnsDb.Hsapiens.v86)
seqlevelsStyle(annotation) <- "UCSC"
genome(annotation) <- "hg38"
samples <- c("Control", "ControlTGFB", "GSK", "GSKTGFB")
combined_split <- SplitObject(combined, split.by = "orig.ident")
i = args[1]
sample = samples[[i]]
split <- combined_split[[i]]
combined.cds <- as.cell_data_set(x = split, assay = "peaks")
combined.cicero <- make_cicero_cds(combined.cds, reduced_coordinates = reducedDims(combined.cds)$UMAP)
# get the chromosome sizes from the Seurat object
genome <- seqlengths(annotation)
# use chromosome 1 to save some time
# omit this step to run on the whole genome
#genome <- genome[1]
# convert chromosome sizes to a dataframe
genome.df <- data.frame("chr" = names(genome), "length" = genome)
# run cicero
conns <- run_cicero(combined.cicero, genomic_coords = genome.df, sample_num = 100)
saveRDS(split, file = file.path(save.dir, paste(sample,"cicero_conns.rds", sep = "_")))
#Convert pairwise co-assability links to co-accessability networks
ccans <- generate_ccans(conns)
#Convert connections to links and add to the seurat object
links <- ConnectionsToLinks(conns = conns, ccans = ccans)
Links(split) <- links
saveRDS(split, file = file.path(save.dir, paste(sample,"Signac_Links.rds", sep = "_")))
|
511ec96913bf11b6b291cee11b975dd9083643fd
|
eb8ac840ab9fae607855149c23cd2960e108cd7d
|
/ciangene/CNV/ExomeDepth/bin/exomeDepth_optparse.R
|
89b1fb61452bf7e787d851a664a218d8c8e617d3
|
[] |
no_license
|
UCLGeneticsInstitute/DNASeq_pipeline
|
29426e33f12b024afd76a32bba5a4836d60d2888
|
36b627f3ac26e6f060f7e2a612344a46e2af015c
|
refs/heads/master
| 2020-12-26T09:27:55.693600
| 2018-12-11T12:14:23
| 2018-12-11T12:14:23
| 63,901,985
| 1
| 2
| null | 2016-07-21T20:58:42
| 2016-07-21T20:58:41
| null |
UTF-8
|
R
| false
| false
| 5,786
|
r
|
exomeDepth_optparse.R
|
suppressPackageStartupMessages(library(S4Vectors) )
suppressPackageStartupMessages(library(IRanges) )
suppressPackageStartupMessages(library(Rsamtools))
suppressPackageStartupMessages(library(Biostrings))
suppressPackageStartupMessages(library(XVector) )
suppressPackageStartupMessages(library(GenomicRanges))
suppressPackageStartupMessages(library(ExomeDepth) )
suppressPackageStartupMessages(library(optparse) )
option_list <- list(
make_option(c("--chrom"), default=NULL,help="Chromosome"),
make_option(c("-v", "--verbose"), action="store_true", default=TRUE,help="Print extra output [default]"),
make_option(c("--SampleList"), help="one column file containing list of samples",type='character',default=NULL),
make_option(c("--BamList"), help="one column file containing list of samples",type='character',default=NULL),
make_option(c("--oDir"), help="oDir",type='character')
)
opt <- parse_args(OptionParser(option_list=option_list))
if ( opt$verbose ) {
write("Starting Argument checks...\n", stderr())
}
######################
SampleList<-opt$SampleList
BamList<-opt$BamList
data(exons.hg19)
data(Conrad.hg19)
fasta<-"/cluster/scratch3/vyp-scratch2/reference_datasets/human_reference_sequence/human_g1k_v37.fasta"
min.nb.per.batch<-5
max.nb.per.batch<-15
if(!is.null(SampleList) & !is.null(BamList) ) stop("Specify either list of samples to be found or BAM file list directly. Not both.")
if(!is.null(SampleList))
{
if(!file.exists(SampleList))stop("SampleList file doesn't exist. ")
message(paste('Reading samples from',paste0('--',SampleList,'--')))
SampleList<-read.table(SampleList,header=FALSE)
message(paste('Provided',nrow(SampleList),'samples'))
SampleList<-SampleList[,1]
## Find bams for the samples
bamList<-list.files('/SAN/vyplab/UCLex_raw',pattern='bam$',full.names=T)
bamList<-bamList[grep('sorted_unique',bamList)]
sample.bams<-vector()
sample.names<-vector()
for(i in 1:length(SampleList))
{
hit<-grep(SampleList[i],bamList)
if(length(hit)>0)sample.bams<-c(sample.bams,bamList[hit])
if(length(hit)>0)sample.names<-c(sample.names,SampleList[i])
if(length(hit)==0)print(paste('No BAM file found for',SampleList[i]))
}
message(paste('Found BAM files for',length(sample.bams),'samples'))
if(length(SampleList)>max.nb.per.batch) message('There are too many samples, consider splitting?')
if(length(SampleList)<min.nb.per.batch) message('There are too few samples, results may be unreliable')
if(length(sample.bams)>length(SampleList))
{
message("Multiple BAMs exist for >=1 samples. Will use the larger one")
#Find the duplicate files and remove the smaller one.
dups<-sample.bams[grep(sample.names[which( lapply(lapply(sample.names,function(x) grep(x,sample.bams)),function(x) length(x)) >1) ],sample.bams)]
file.sizes<-file.size(dups)
sample.bams<-sample.bams[sample.bams!=dups[file.sizes==min(file.sizes)]]
}
}
if(!is.null(BamList))
{
sample.bams<-read.table(BamList,header=FALSE)[,1]
message(paste('Read file containing',length(sample.bams),'sample BAMs'))
}
outDir<-paste0(opt$oDir,'/')
if(!file.exists(outDir))dir.create(outDir)
message(paste('Results will be placed in --',outDir))
write.table(sample.bams,paste0(outDir,'SampleList'),col.names=F,row.names=F,quote=F,sep='\t')
countsFile<-paste0(outDir,"/counts.RData")
if(!file.exists(countsFile))
{
print("Getting counts per region")
my.counts <- getBamCounts(bed.frame = exons.hg19,,
bam.files = sample.bams,
include.chr = FALSE,
referenceFasta = fasta)
# turn counts into dataframe
my.counts.dat <- as(my.counts[, colnames(my.counts)], 'data.frame')
print(head(my.counts.dat))
my.counts.dat$chromosome <- gsub(as.character(my.counts.dat$space),pattern = 'chr',replacement = '')
save.image(file=countsFile )
} else load(countsFile)
message('Finished getting counts. now going to call CNVs')
sample.cols<-grep('bam',colnames(my.counts.dat))
genes<-read.table('/SAN/vyplab/UCLex/support/genes.bed',header=TRUE,sep='\t')
for(case in 1:length(sample.cols))
{
print(paste('Current case is:',colnames(my.counts.dat)[sample.cols][case]))
output.file<-paste0(outDir,colnames(my.counts.dat)[sample.cols[case]], '_CNV_calls.csv')
callFile<-paste0(outDir,colnames(my.counts.dat)[sample.cols[case]], '_CNV_calls.RData')
pdfFile<-paste0(outDir,colnames(my.counts.dat)[sample.cols[case]], '_CNV_calls.pdf')
#pdf(pdfFile)
my.test <- my.counts.dat[,sample.cols[case] ]
my.reference.set <- as.matrix(my.counts.dat[,sample.cols[-case] ])
my.choice <- select.reference.set (test.counts = my.test,
reference.counts = my.reference.set,
bin.length = (my.counts.dat$end - my.counts.dat$start)/1000,
n.bins.reduced = 10000)
my.matrix <- as.matrix( my.counts.dat[, my.choice$reference.choice, drop = FALSE])
my.reference.selected <- apply(X = my.matrix,
MAR = 1,
FUN = sum)
all.exons <- new('ExomeDepth',
test =my.test,
reference = my.reference.selected,
formula = 'cbind(test, reference) ~ 1')
all.exons <- CallCNVs(x = all.exons,
transition.probability = 10^-4,
chromosome = my.counts.dat$space,
start = my.counts.dat$start,
end = my.counts.dat$end,
name = my.counts.dat$names)
if(nrow(all.exons@CNV.calls)>0)
{
all.exons <- AnnotateExtra(x = all.exons,
reference.annotation = Conrad.hg19.common.CNVs,
min.overlap = 0.5,
column.name = 'Conrad.hg19')
exons.hg19.GRanges <- GenomicRanges::GRanges(seqnames = exons.hg19$chromosome,
IRanges::IRanges(start=exons.hg19$start,end=exons.hg19$end),
names = exons.hg19$name)
all.exons <- AnnotateExtra(x = all.exons,
reference.annotation = exons.hg19.GRanges,
min.overlap = 0.0001,
column.name = 'exons.hg19')
save(all.exons,file=callFile)
write.csv(file = output.file, x = all.exons@CNV.calls,row.names = FALSE)
}
}
|
6087dd85870cf5f3ddd12cee92b3987e430bc099
|
8efa4abbf80541dee202d9211bec2d71991519da
|
/ch_03/ch_03_2.R
|
6c7259c234ce1f2847479e1212f3387d6eb80f87
|
[] |
no_license
|
kimjunho12/R_BigData
|
ad07009c7e9b919f0321b84655758791004cb3ab
|
fdff2da689a31a6bbe38d448c52f7decc0730fee
|
refs/heads/master
| 2023-06-09T10:42:01.070830
| 2021-06-30T01:53:43
| 2021-06-30T01:53:43
| 361,130,167
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 640
|
r
|
ch_03_2.R
|
# 데이터 불러오기
library(readxl)
read_excel('../data/customer_profile.xlsx')
read_excel(
"../data/customer_profile.xlsx",
sheet = NULL,
range = 'B3:E13',
col_names = T
)
read.csv(file = '../data/customer_profile.csv',
header = T,
stringsAsFactors = F)
test1 = c(1:10)
write.csv(test1, file = '../data/test1.csv')
save(test1, file = '../data/test1.rda')
rm(test1)
load('../data/test1.rda')
library(foreign)
iris_spss = read.spss('../data/iris.sav',
to.data.frame = T,
use.value.labels = F) # factor 형태로 받기
head(iris_spss)
summary(iris_spss)
|
f6d0f81f8e8b1993442cb0da1d16e73c190b3a0b
|
2616a72d7029dd695fc133af709bb9f1a32dc2c1
|
/man/stop.identify.Rd
|
3f76c064fd7df848947c3f19166ad95b9184807b
|
[] |
no_license
|
cran/kineticF
|
0c1e1489738b0bf404a158886db37f2496aecf25
|
06431726251f35b969262e3e119da14dba711fc4
|
refs/heads/master
| 2016-08-11T15:21:04.538141
| 2015-06-04T00:00:00
| 2015-06-04T00:00:00
| 36,883,150
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 613
|
rd
|
stop.identify.Rd
|
\name{stop.identify}
\alias{stop.identify}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Stops the process of re-ordering a matrix of coordinates
}
\description{
Changes the order in which a matrix of coordinates is plotted to allow closure on a polygon.
This function is for internal use and is not meant to be called by the user.
}
\usage{
stop.identify(xy)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{xy}{matrix of coordinates}
}
\value{
A re-ordered matrix of coordinates
}
\author{
Dipesh E Patel & Mario Cortina-Borja
}
|
9a0d437a09bbbcce467135b858bf85f60ead300d
|
ac31cc01afb79292cd70046661bfb70a8fa195fc
|
/source/plot4.R
|
c6d808b84195a6b0e540835e13d729d0ac9f81f4
|
[] |
no_license
|
Coursera00/ExData_Plotting1
|
5cb5d6146cd6bfe5c7aeb46f796eeabd4b489217
|
5c035db03424dedff9dc18f60ed02b9bb20a4e7d
|
refs/heads/master
| 2021-01-18T18:57:00.645147
| 2014-07-12T18:02:17
| 2014-07-12T18:02:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,585
|
r
|
plot4.R
|
library(dplyr)
setwd('C:/Users/Jul/Box Sync/_PhD/_Big Data/Coursera/Exploratory Data Analysis/')
#dat <- read.table(pipe('grep "^[1-2]/2/2007" "household_power_consumption.txt"'), header=TRUE, sep=';')
newFile <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings ="?")
#create new column - DateTime
newFile$DateTime <- paste(newFile$Date, newFile$Time)
newFile$DateTime <- as.Date(newFile$DateTime, format = "%d/%m/%Y %H:%M:%S")
#filter values for only two dates and store them in df = subsetted
subsetted <- filter(newFile, DateTime >= as.Date("2007-02-01 00:00:00"), DateTime < as.Date("2007-02-03 00:00:00"))
subsetted$DateTime2 <- strptime(paste(subsetted$Date,subsetted$Time), format="%d/%m/%Y %H:%M:%S")
#Plot 4 - Multiple Plots in one
png('plot4.png')
# 2 rows and 2 columns
par(mfcol = c(2,2))
plot( subsetted$DateTime2, subsetted$Global_active_power, type = "l",xlab = "", ylab = "Global Active Power")
plot(subsetted$DateTime2,subsetted$Sub_metering_1,type = "l", xlab = "", ylab = "Energy sub metering", col = "black")
lines(subsetted$DateTime2,subsetted$Sub_metering_2,type = "l", col = "red")
lines(subsetted$DateTime2,subsetted$Sub_metering_3,type = "l", col = "blue")
legend("topright", col = c("black", "red", "blue"), lty= "solid", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3") )
plot( subsetted$DateTime2, subsetted$Voltage, type = "l",xlab = "datetime", ylab = "voltage")
plot(subsetted$DateTime2,subsetted$Global_reactive_power, type ='l', xlab ="datetime", ylab = "Global reactive power")
dev.off()
|
9677c7926043dad01f68bec6898a3f8ad2055453
|
424b0e776929dda8a77732288170eb8b68f5d9c8
|
/plot4.R
|
4e1e15252c7c3d856fd6bdabac8072ba97b1f5aa
|
[] |
no_license
|
ankurkhaitan/ExData_Course_Project2
|
b21ba28c0be904633a2c4289e9d5ead8d5f05108
|
0cc6d06044b79b5edf743b06032275dca7eeb945
|
refs/heads/master
| 2022-04-18T23:47:09.724723
| 2020-04-19T15:33:31
| 2020-04-19T15:33:31
| 257,034,966
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,952
|
r
|
plot4.R
|
# ========================================================================================================================================
# Load Libraries
# ========================================================================================================================================
library('dplyr')
library('plyr')
library('ggplot2')
# ========================================================================================================================================
# Download and extract Data and load file
# ========================================================================================================================================
zipFile <- "exdata%2Fdata%2FNEI_data.zip"
if (!file.exists("Data/Source_Classification_Code.rds") && !file.exists("Data/summarySCC_PM25.rds")) {
dataURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(dataURL, zipFile, mode = "wb")
unzip(zipFile, files = NULL, list = FALSE, overwrite = TRUE, junkpaths = FALSE, exdir = "Data", unzip = "internal", setTimes = FALSE)
file.remove(zipFile)
}
# Define Directory where File is located
dirName <- 'Data'
# load classification code data
fileNameClass = "Source_Classification_Code.rds"
fileNameClass <- file.path(dirName, fileNameClass)
# load Summary CSS PM25 data
fileNameSummary = "summarySCC_PM25.rds"
fileNameSummary <- file.path(dirName, fileNameSummary)
# data <- read.table(file = fileNamePower, header = TRUE, sep = ';')
NEI <- readRDS(file = fileNameClass)
SCC <- readRDS(file = fileNameSummary)
# ========================================================================================================================================
# Data preparation
# ========================================================================================================================================
# calculate total amount of emissions per year and SCC
dataSCC <- ddply(SCC, .(year, SCC), summarise, Emissions = sum(Emissions))
# remove not required columns
NEI <- NEI[, c('SCC', 'EI.Sector')]
# convert all fields to lower case
NEI$EI.Sector <- lapply(NEI$EI.Sector, function(x) tolower(as.character(x)))
# define regex pattern
pattern <- 'fuel comb - [a-z ,/]* - coal'
# subset and retrieve only comb / coal data based on regex pattern
comb_coal <- subset(NEI, grepl(pattern, NEI$EI.Sector))
# evaluate which code exists in both data sets
exist_in_both <- ifelse(dataSCC$SCC %in% comb_coal$SCC, TRUE, FALSE)
dataSCC_ <- subset(dataSCC, exist_in_both)
# calculate total amount of emissions per year and SCC
dataSCC_ <- ddply(dataSCC_, .(year), summarise, Emissions = sum(Emissions))
# devide total amount of emissions by 1'000 (thousend tons)
dataSCC_$Emissions <- lapply(dataSCC_$Emissions, function(x) round(x / 1e3, 2))
# ========================================================================================================================================
# Create and plot graph
# ========================================================================================================================================
png(filename = "plot4.png", width = 600, height = 600, units = "px", bg = "white")
# define margins
par(mfrow = c(1, 1), mar = c(5, 5, 3, 1))
with(dataSCC_, plot(year, Emissions, pch = 20, col = "red", xlim = c(1998, 2009), xaxt = "n", cex = 2.5, panel.first = grid(),
main = expression("US Annual PM"[2.5] * " Emissions from coal combustion-related sources"),
xlab = "Year", ylab = expression("PM"[2.5] * " Emissions (thousend tonnes)")))
# add a line between points
lines(dataSCC_$year, dataSCC_$Emissions, type = "l", lwd = 2)
axis(1, c(1999, 2002, 2005, 2008))
# print values for each point in graph
text(dataSCC_$year, dataSCC_$Emissions, dataSCC_$Emissions, cex = 1.0, pos = 4, col = "black")
dev.off()
|
1cb8db771b18515ce7b843ddcb801858330a2186
|
a0585ca647461121f67f91069809cecf7f5a7e5f
|
/app/server.R
|
f78741b4ffc591d624c73048329ca9ffad1b675f
|
[] |
no_license
|
tyz910/dsscapstone
|
9e8f1ec60d83cb97a0bab6282d07fc62d741d905
|
07a33d1c16eaaf39ad169781ad6820a396b52db4
|
refs/heads/master
| 2020-05-21T12:50:37.492601
| 2015-08-15T05:55:34
| 2015-08-15T05:55:34
| 39,577,614
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 255
|
r
|
server.R
|
source("prediction.R")
library(shiny)
function(input, output) {
output$prediction <- renderUI({
HTML(paste("<ul>", paste(lapply(predict(input$sentence), function (word) {
paste('<li>', word, '</li>')
}), collapse = " "), "</ul>"))
})
}
|
d828fc418efbad64747ba421ecde80be8c95456f
|
62954b5457c4ff4392645d54e7c214a73fb440ad
|
/r_work/cacheSolve.R
|
76a85d1bb798e8c633d4227966639b88c608afeb
|
[] |
no_license
|
huskertc/datasciencecoursera
|
4ee9c44b949d6219bf021a34ff3a9390070aa771
|
8696b99a246d39dd8e04ff82e758687a6e6ac478
|
refs/heads/master
| 2020-04-05T23:40:56.434012
| 2015-02-22T23:24:23
| 2015-02-22T23:24:23
| 29,822,882
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 670
|
r
|
cacheSolve.R
|
## Function "cacheSolve" returns the inverse of matrix x.
## If the matrix is unaltered then the cache copy of the inverse is returned,
## else the inverse is calculated and returned.
cacheSolve <- function(x, ...) {
my_inverse <- x$get_inverse() # Call get_inverse to load inverse
if(!is.null(my_inverse)) { # If inverse previously calc'ed then return from cache
message("getting cached inverse matrix")
return(my_inverse)
}
my_data <- x$get() # If inverse not available then calc it and return
my_inverse <- x$set_inverse(my_data) # set_inverse calcs the inverse
return(my_inverse)
}
|
04f3a5de6243150fd07731c88c5b5e2d04ff2ef0
|
958ebca1fab699ba605b7a38f05ff47dab9b4d69
|
/global.R
|
276fd1e9aad2237af9e1d72fc3007d095045ba80
|
[] |
no_license
|
BeachA89/Canoe-Sprint-Stroke-Analysis
|
78ec431da8da74ea9267562f65d9d836374c000e
|
81e02b189c0fd7da6858746c1d2fd078d4bb43ab
|
refs/heads/main
| 2023-02-11T19:01:52.748274
| 2021-01-09T22:25:24
| 2021-01-09T22:25:24
| 328,259,850
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 298
|
r
|
global.R
|
source("peakdet.R")
library(quantmod)
library(zoo)
library(R.utils)
library(svDialogs)
library(car)
library(ggplot2)
library(ggpmisc)
library(signal)
library(shiny)
library(shinythemes)
library(dplyr)
library(quantmod)
library(zoo)
library(R.utils)
library(svDialogs)
library(DT)
library(ggplot2)
|
33afc2c4c3459901903e56cc9acb032e1d356859
|
6dbac72ced093929c84431726d2a6df3b90fd9f6
|
/lib/make_cluster_diagram.r
|
65814175d92fee18dbd43f8333b0c4e1afef4928
|
[] |
no_license
|
serina-robinson/wastewater_isolates
|
64ded6155129765d7d896508e6708b03ca1071b0
|
ee208aea9d5f4a986300faa6d1001c59b7b7ddd4
|
refs/heads/master
| 2020-08-01T05:15:31.874845
| 2019-10-20T05:46:28
| 2019-10-20T05:46:28
| 210,876,407
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,424
|
r
|
make_cluster_diagram.r
|
make_cluster<-function(shrt, thresh1, thresh2, bynum, namvec, pal2) { # input is an all vs. all table
# #Pull out columns to make a sequence similarity network
#First remove all comparisons between identical proteins (closed loop nodes)
noprs<-shrt[!shrt$prot1==shrt$prot2,]
noprs<-noprs[order(noprs$eval),]
source("src/color_mibig_ANL.r")
meta<-color_mibig(noprs, namvec, pal2)
#Make a unique data frame
metu<-unique(meta)
for(i in seq(from = thresh1, to = thresh2, by = bynum)) {
# Set a similarity threshold
thresh<-i
net <- noprs[noprs$eval<(as.numeric(paste0("1.00e-",thresh))),]
metu2 <- metu[order(metu$size),]
# write_csv(net, "output/graph_net_test.csv")
#Simplify the graph
#gr<-simplify(graph.data.frame(net,vertices=metu2,directed = FALSE))
g <- simplify(graph.data.frame(net, vertices=metu2, directed = FALSE))
g <- delete.vertices((g), degree(g)<1)
#Append metadata
V(g)$color <- V(g)$color
V(g)$size <- V(g)$size
l <- layout_components(g)
}
##Make the graph
# tiff(file = paste0("output/",nrow(net),"_OleC_only_network_e-",thresh,"_300dpi.tiff"), units = "in", width = 10, height = 10, res = 300)
# jpeg(file = paste0("output/", nrow(net),"_antismashv2_CreM_predictions_network_try3_e-",thresh,"_1000px_labeled.jpg"), width = 2000, height = 2000)
# # pdf(file = paste0("output/",nrow(net),"_OleC_only_network_e-",thresh,".pdf"),width = 20, height = 20)
# par(mar=c(1,1,1,1))
# pl<-plot(g, vertex.label = NA,
# # vertex.label=ifelse(grepl("_MACS",V(g)$name), V(g)$name, NA),
# layout=l, edge.color="gray40", edge.width=0.3)
# title(main = paste0("BLAST e-value cut-off: 1e-",thresh))
# dev.off()
# }
#data<-toVisNetworkData(g)
#visNetwork(nodes=data$nodes,edges=data$edges,height="500px") %>%
#visEdges(smooth = FALSE) %>%
#visPhysics(stabilization = FALSE) %>%
#visSave(file = paste0("Biuret_hydrolase_network_e-",thresh,".html"), background = "white")
# Make a legend
colors<-unique(metu$color)
proteins<-unique(metu$fam)
clegend <- data.frame(colors, proteins)
# Plot the legend
pdf(file=paste0("output/", nrow(net), "_legend_functional_class_final.pdf"), width = 5, height = 10)
plot.new()
legend("bottomright",legend=proteins, fill = colors, bty="n") #horiz = T, nrow = 2)
dev.off()
return(list(net=net,g=g, legend = clegend))
}
|
91a231c026be0a457f1acb55291fc1d0fb7d9857
|
b8f69e2a1d3d706f2d9b767b99c0df95b23ad56f
|
/man/pcaGuide.Rd
|
ed188f3bf1e7d85916ac1955d563983de15c732a
|
[
"MIT"
] |
permissive
|
cran/wilson
|
b03932a828d284a6b8b8b29411721727c6268ec0
|
e2dec1181e01d212b545a6ebfb53beee6320cf2f
|
refs/heads/master
| 2021-06-08T21:43:25.793829
| 2021-04-19T08:40:02
| 2021-04-19T08:40:02
| 145,903,340
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 340
|
rd
|
pcaGuide.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pca.R
\name{pcaGuide}
\alias{pcaGuide}
\title{pca module guide}
\usage{
pcaGuide(session)
}
\arguments{
\item{session}{The shiny session}
}
\value{
A shiny reactive that contains the texts for the Guide steps.
}
\description{
pca module guide
}
|
f331986a8d73b85ea412ffeca98524fd4e6978ea
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/MazamaSpatialUtils/examples/dissolve.Rd.R
|
e9cf917b8e541e023ab65fa1187cdf86928744cf
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 256
|
r
|
dissolve.Rd.R
|
library(MazamaSpatialUtils)
### Name: dissolve
### Title: Aggregate shapes in a SpatialPolygonsDataFrame
### Aliases: dissolve
### ** Examples
regions <- dissolve(SimpleCountries, field = "UN_region", sum_fields = "area")
plot(regions)
regions@data
|
aec915421746f538009085dbad4bba5920736e5c
|
83f461519bff4467a1a175ca686ad06a2a7e257b
|
/R/kfn.R
|
004785feacde1665dd15c1f1ef5a75adbc9387c7
|
[] |
no_license
|
Yashwants19/RcppMLPACK
|
3af64c6b1327e895b99637649591d1671adf53a5
|
2d256c02058aa7a183d182079acff9037a80b662
|
refs/heads/master
| 2022-12-04T05:06:17.578747
| 2020-07-22T12:45:42
| 2020-07-22T12:45:42
| 252,217,735
| 9
| 0
| null | 2020-08-18T06:15:14
| 2020-04-01T15:41:38
|
C++
|
UTF-8
|
R
| false
| false
| 5,961
|
r
|
kfn.R
|
#' @title k-Furthest-Neighbors Search
#'
#' @description
#' An implementation of k-furthest-neighbor search using single-tree and
#' dual-tree algorithms. Given a set of reference points and query points, this
#' can find the k furthest neighbors in the reference set of each query point
#' using trees; trees that are built can be saved for future use.
#'
#' @param algorithm Type of neighbor search: 'naive', 'single_tree', 'dual_tree',
#' 'greedy'. Default value "dual_tree" (character).
#' @param epsilon If specified, will do approximate furthest neighbor search with
#' given relative error. Must be in the range [0,1). Default value "0"
#' (numeric).
#' @param input_model Pre-trained kFN model (KFNModel).
#' @param k Number of furthest neighbors to find. Default value "0" (integer).
#' @param leaf_size Leaf size for tree building (used for kd-trees, vp trees, random
#' projection trees, UB trees, R trees, R* trees, X trees, Hilbert R trees, R+
#' trees, R++ trees, and octrees). Default value "20" (integer).
#' @param percentage If specified, will do approximate furthest neighbor search. Must
#' be in the range (0,1] (decimal form). Resultant neighbors will be at least
#' (p*100) % of the distance as the true furthest neighbor. Default value "1"
#' (numeric).
#' @param query Matrix containing query points (optional) (numeric matrix).
#' @param random_basis Before tree-building, project the data onto a random
#' orthogonal basis. Default value "FALSE" (logical).
#' @param reference Matrix containing the reference dataset (numeric matrix).
#' @param seed Random seed (if 0, std::time(NULL) is used). Default value "0"
#' (integer).
#' @param tree_type Type of tree to use: 'kd', 'vp', 'rp', 'max-rp', 'ub', 'cover',
#' 'r', 'r-star', 'x', 'ball', 'hilbert-r', 'r-plus', 'r-plus-plus', 'oct'.
#' Default value "kd" (character).
#' @param true_distances Matrix of true distances to compute the effective error
#' (average relative error) (it is printed when -v is specified) (numeric
#' matrix).
#' @param true_neighbors Matrix of true neighbors to compute the recall (it is
#' printed when -v is specified) (integer matrix).
#' @param verbose Display informational messages and the full list of parameters and
#' timers at the end of execution. Default value "FALSE" (logical).
#'
#' @return A list with several components:
#' \item{distances}{Matrix to output distances into (numeric matrix).}
#' \item{neighbors}{Matrix to output neighbors into (integer matrix).}
#' \item{output_model}{If specified, the kFN model will be output here (KFNModel).}
#'
#' @details
#' This program will calculate the k-furthest-neighbors of a set of points. You
#' may specify a separate set of reference points and query points, or just a
#' reference set which will be used as both the reference and query set.
#'
#' @author
#' mlpack developers
#'
#' @export
#' @examples
#' # For example, the following will calculate the 5 furthest neighbors of
#' # eachpoint in "input" and store the distances in "distances" and the
#' # neighbors in "neighbors":
#'
#' \donttest{
#' output <- kfn(k=5, reference=input)
#' distances <- output$distances
#' neighbors <- output$neighbors
#' }
#'
#' # The output files are organized such that row i and column j in the
#' # neighbors output matrix corresponds to the index of the point in the
#' # reference set which is the j'th furthest neighbor from the point in the
#' # query set with index i. Row i and column j in the distances output file
#' # corresponds to the distance between those two points.
kfn <- function(algorithm=NA,
epsilon=NA,
input_model=NA,
k=NA,
leaf_size=NA,
percentage=NA,
query=NA,
random_basis=FALSE,
reference=NA,
seed=NA,
tree_type=NA,
true_distances=NA,
true_neighbors=NA,
verbose=FALSE) {
# Restore IO settings.
IO_RestoreSettings("k-Furthest-Neighbors Search")
# Process each input argument before calling mlpackMain().
if (!identical(algorithm, NA)) {
IO_SetParamString("algorithm", algorithm)
}
if (!identical(epsilon, NA)) {
IO_SetParamDouble("epsilon", epsilon)
}
if (!identical(input_model, NA)) {
IO_SetParamKFNModelPtr("input_model", input_model)
}
if (!identical(k, NA)) {
IO_SetParamInt("k", k)
}
if (!identical(leaf_size, NA)) {
IO_SetParamInt("leaf_size", leaf_size)
}
if (!identical(percentage, NA)) {
IO_SetParamDouble("percentage", percentage)
}
if (!identical(query, NA)) {
IO_SetParamMat("query", to_matrix(query))
}
if (!identical(random_basis, FALSE)) {
IO_SetParamBool("random_basis", random_basis)
}
if (!identical(reference, NA)) {
IO_SetParamMat("reference", to_matrix(reference))
}
if (!identical(seed, NA)) {
IO_SetParamInt("seed", seed)
}
if (!identical(tree_type, NA)) {
IO_SetParamString("tree_type", tree_type)
}
if (!identical(true_distances, NA)) {
IO_SetParamMat("true_distances", to_matrix(true_distances))
}
if (!identical(true_neighbors, NA)) {
IO_SetParamUMat("true_neighbors", to_matrix(true_neighbors))
}
if (verbose) {
IO_EnableVerbose()
} else {
IO_DisableVerbose()
}
# Mark all output options as passed.
IO_SetPassed("distances")
IO_SetPassed("neighbors")
IO_SetPassed("output_model")
# Call the program.
kfn_mlpackMain()
# Add ModelType as attribute to the model pointer, if needed.
output_model <- IO_GetParamKFNModelPtr("output_model")
attr(output_model, "type") <- "KFNModel"
# Extract the results in order.
out <- list(
"distances" = IO_GetParamMat("distances"),
"neighbors" = IO_GetParamUMat("neighbors"),
"output_model" = output_model
)
# Clear the parameters.
IO_ClearSettings()
return(out)
}
|
7dd728304e2f2581cc9d7c56471070af91657c82
|
aa11979c1b0293a817175def253b107e444e759c
|
/plots/plotv2.R
|
000cc5d5082fd6d34c36799dde0de59f6315eb80
|
[] |
no_license
|
labrax/arduino2pi
|
969c7a5ebcd48b34c05e8342cadd26c5cc373c4e
|
1f15719210a659afedc347be55372527a15575ce
|
refs/heads/master
| 2020-04-15T00:20:01.642403
| 2019-09-22T11:02:51
| 2019-09-22T11:02:51
| 164,236,226
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,763
|
r
|
plotv2.R
|
library(readr)
library(ggplot2)
read_v2 <- function() {
cols <- c("year", "month", "day", "hour", "minute", "second", "amount_samples", "pir_min", "pir_max", "pir_sum", "mv_min", "mv_max", "mv_sum", "phr_min", "phr_max", "bme_temp_avg", "bme_pressure_avg")
df <- NULL
for(file in c("20190107_v2.csv", "20190108_v2.csv")) {
cdf <- read_csv(paste0("data/", file), col_names = cols)
if(is.null(df)) {
df <- cdf
} else {
df <- rbind(df, cdf)
}
}
timestr <- paste(paste(df$year, df$month, df$day, sep = '-'), paste(df$hour, df$minute, df$second, sep = ':'))
times <- as.POSIXct(timestr, tz="Europe/Berlin") # it is recorded in german time
attributes(times)$tzone <- "Europe/London" #we need it in uk time
df$year <- NULL
df$month <- NULL
df$day <- NULL
df$hour <- NULL
df$minute <- NULL
df$second <- NULL
df$time <- times
return(df);
}
df <- read_v2();
# df <- df[df$time > as.POSIXct("2019/01/08 08:10:00", tz="Europe/London"),]
# df <- df[df$time < as.POSIXct("2019/01/08 08:30:00", tz="Europe/London"),]
# ggplot(df, aes(x=time, y=bme_temp_avg, col=4)) + geom_line() + theme(legend.position="none")
# ggplot(df, aes(x=time, y=bme_pressure_avg, col=5)) + geom_line() + theme(legend.position="none")
head(df)
df$phr_max <- df$phr_max/max(df$phr_max)
df$mv_max <- df$mv_max/max(df$mv_max)
df$bme_temp_avg <- df$bme_temp_avg/max(df$bme_temp_avg)
df$bme_pressure_avg <- df$bme_pressure_avg/max(df$bme_pressure_avg)
ggplot(df, aes(x=time)) +
geom_point(aes(y=pir_max, color=1)) +
geom_line(aes(y=mv_max), color=2) +
geom_line(aes(y=phr_max), color=3) +
geom_line(aes(y=bme_temp_avg), color=4) +
geom_line(aes(y=bme_pressure_avg), color=5) +
ylab('') +
theme(legend.position="none")
|
065a01687af0c807b66ed23754bc5d3038faf5c2
|
1fd8b00e9265e4998e5b76ea020f2420853b5875
|
/ggplot_bar_status_count.R
|
6ba98db0d8bc7be9bee7dfeeeab70fa27e7add43
|
[] |
no_license
|
avrao/log_analysis_r
|
b705f09936c1af1f34e6e9f6802edf786b8e4ca4
|
950f15aebf63e66e5c99cd5806c52eae67743f04
|
refs/heads/master
| 2020-03-28T12:08:09.745261
| 2018-09-12T16:24:35
| 2018-09-12T16:24:35
| 148,271,996
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 780
|
r
|
ggplot_bar_status_count.R
|
library(ggplot2)
#access.log is required to be included int eh same directory
df = read.table('access.log')
#head(df)
colnames(df) = c('ip_address', 'nameless_v2', 'nameless_v3', 'date', 'nameless_v5', 'request', 'status', 'bytes', 'url', 'browser_specs')
df$date = as.Date(df$date, "[%d/%b/%Y")
#df$time = as.Date(df$time, ":h:m:s")
# head(df[["date"]])
# head(df[["status"]])
# head(df)
str(df)
#print(names(df))
print(table(df$date))
reqs = as.data.frame(table(df$date))
print(reqs)
ggplot( data = df, aes(x = format(df$status)) ) + geom_bar() + xlab('Status') + ylab('Count')+ theme(axis.text.x = element_text(color = "black", size = 8, angle = 45), axis.text.y = element_text(color = "black", size=8, angle=45))
ggsave("plot_bar_status_cnt.png", width = 15, height = 15)
|
d21277db8ff5deaab27d26e5e47cc148a43bcb00
|
af9fab9a0a1d0a37009c24fc2f57a9b5adae945a
|
/StockSplits.R
|
934fe4b3e9be28ac0fb68cd61833acfca322a858
|
[] |
no_license
|
antonnemes/WRDS-CRSP-StockSplits
|
fbc1a41a5e3521cd543aecd04972efeaaf751319
|
60a8774c58280ad3882f5dd5d246dde44a109f0f
|
refs/heads/master
| 2022-11-24T21:22:41.164120
| 2020-07-18T23:06:50
| 2020-07-18T23:06:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,810
|
r
|
StockSplits.R
|
####################################
# This code produces Stock Splits
# From the CRSP DSE files
# Note specific treatment and
# selection of share codes
# adjust to your own needs
# Use of this script contains no
# warrants.
####################################
# J.T. Fluharty-Jaidee
# last edit: 07/13/2020
####################################
library(tidyverse)
library(readr)
library(stringr)
library(withr)
library(RPostgres)
library(sqldf)
library(lubridate)
library(MASS)
library(fractional)
####### Log into WRDS and set up the direct connection
closeAllConnections()
wrds <- dbConnect(Postgres(),
host='wrds-pgdata.wharton.upenn.edu',
port=9737,
dbname='wrds',
sslmode='require',
user='',
password='') ### note update User and password for WRDS
############### Not required: Pull schemas if you need to find variable names
##### CRSP DSF schema
# res <- dbSendQuery(wrds, "select column_name
# from information_schema.columns
# where table_schema='crsp'
# and table_name='dsf'
# order by column_name")
# data <- dbFetch(res, n=-1)
# dbClearResult(res)
# data
#
###### COMPUSTAT Funda schema
# res <- dbSendQuery(wrds, "select column_name
# from information_schema.columns
# where table_schema='comp'
# and table_name='funda'
# order by column_name")
# data <- dbFetch(res, n=-1)
# dbClearResult(res)
# data
### Note: DISTCD is the distribution code first digit of 5 indicates splits or stock dividends, second digit
####### represents a secondary payment method
####### 0 - unknown
####### Code Meaning
####### 0 unknown, not yet coded
####### 1 unspecified or not applicable
####### 2 cash, United States dollars
####### 3 cash, foreign currency converted to US dollars
####### 4 cash, Canadian dollars (now obsolete, converted to US dollars)
####### 5 same issue of common stock
####### 6 units including same issue of common stock
####### 7 an issue of a different common stock which is on the file
####### 8 other property
##############
####### Remaining digits 3 and 4 represent tax treatment of the distribution, CRSP notes that they do not
####### validate that these are correct so all with in the range are collected.
##############
####### SHRCD represents the type of company shares, 10 to 18 captures all equities and funds, true single-stock equity is 10 and 11 only,
####### filtered later in case others are needed.
##############
crsp.down.call <- dbSendQuery(wrds, "SELECT a.permco, a.permno, a.date, a.cusip, a.dclrdt, a.event, a.paydt, a.rcrddt,
a.distcd, a.divamt, a.facpr, a.facshr, b.cfacpr, b.cfacshr,b.shrout
FROM crsp.dse a join crsp.dsf b
ON a.permno=b.permno
AND a.date=b.date
WHERE a.date between '2000-01-01'
AND '2020-12-31'
AND a.shrcd between 10 AND 18
AND a.distcd between 5000 AND 5199 OR
a.distcd between 5500 AND 5699
")
crsp.down <- dbFetch(crsp.down.call, n=-1)
dbClearResult(crsp.down.call)
head(crsp.down)
##### The above does not include the names or tickers, match back to the DSE.names set for those.
crsp.down.call.names <- dbSendQuery(wrds, "SELECT a.permco, a.permno, a.comnam, a.date, a.shrcd,
a.ticker, a.tsymbol, a.nameendt
FROM crsp.dse a
WHERE a.nameendt between '1995-01-01'
AND '2020-12-31'
AND a.shrcd between 10 AND 18
")
crsp.down.names <- dbFetch(crsp.down.call.names, n=-1)
dbClearResult(crsp.down.call.names)
head(crsp.down.names)
#### Get the list of all names in the sequence, since the names file is constructed as a "from-to" condensed list, you need to expand it
########## to match to the split-event date.
namesmaster<-crsp.down.names %>% split(., sort(as.numeric(rownames(.)))) %>%
map(~complete(data=.x, date = seq.Date(as.Date(.x$date, format="%Y-%m-%d"),as.Date(.x$nameendt, format="%Y-%m-%d"),by="day"))) %>%
map(~fill(data=.x,permco,permno,comnam,shrcd,ticker,tsymbol)) %>% bind_rows() %>% distinct(permco,date,.keep_all = TRUE) %>% dplyr::select(-nameendt)
totalSplit<-merge(crsp.down,namesmaster,by.x=c("permno","date"),by.y=c("permno","date"),all.x=TRUE)
saveRDS(totalSplit,"/scratch/wvu/totalSplit1.rds")
##### following research to the right, splits are distributions which have facpr==facshr, a facpr of 0 would likely be cash distribution, so for splits/reverses select not 0.)
##### Mergers are negative facpr so restrict to >0.
totalSplit<-totalSplit %>% filter(.,shrcd==10 | shrcd==11 & facpr>0 & facpr==facshr) ### See Lin, Singh, Yu (2009, JFE),p.477, and Minnick and Raman (2013, FM) for specific treatment.
totalSplit$facpr2<-totalSplit$facpr+1 ### you need to add one to get the correct factor
totalSplit$SplitRatioTop<-totalSplit$facpr2 %>% fractional %>% numerators() ### note Lin et al. and Minnick have larger samples because they match to Compustat
totalSplit$SplitRatioBottom<-totalSplit$facpr2 %>% fractional %>% denominators()
###### requested ratio style, however, use facpr for calculations.
totalSplit$RatioReport<-paste0(totalSplit$SplitRatioBottom,":",totalSplit$SplitRatioTop)
print(paste0("There are: ", n_distinct(totalSplit$permco.x)," unique firms."))
print(paste0("There are: ", n_distinct(totalSplit)," unique split events."))
saveRDS(totalSplit,"/scratch/wvu/totalSplit2.rds")
###### Compustat CIK Name Pull, if you need to match to the CIKs from COMPUSTAT, note this may
########## reduce the sample significantly as CRSP-COMPUSTAT do not link perfectly.
########## see the CCM linking below.
comp.cik.merge.call <- dbSendQuery(wrds, "SELECT cusip, cik, datadate, fyear, gvkey
FROM comp.funda
WHERE datadate between '1995-01-01'
AND '2020-12-31'
AND datafmt = 'STD'
AND consol = 'C'
AND indfmt ='INDL'
AND popsrc = 'D'
")
comp.cik.merge <- dbFetch(comp.cik.merge.call, n=-1)
dbClearResult(comp.cik.merge.call)
head(comp.cik.merge)
# ####### Collect the CCM_LINKTABLE, schema
# res <- dbSendQuery(wrds, "select column_name
# from information_schema.columns
# where table_schema='crsp'
# and table_name='ccmxpf_linktable'
# order by column_name")
# data <- dbFetch(res, n=-1)
# dbClearResult(res)
# head(data)
#### Create the CCM_LINKTABLE WITH A SQL MERGE
res <- dbSendQuery(wrds,"select GVKEY, LPERMNO, LINKDT, LINKENDDT, LINKTYPE, LINKPRIM
from crsp.ccmxpf_lnkhist")
data.ccmlink <- dbFetch(res, n = -1)
dbClearResult(res)
head(data.ccmlink)
data.ccm <- data.ccmlink %>%
# use only primary links (from WRDS Merged Compustat/CRSP examples)
filter(linktype %in% c("LU", "LC", "LS")) %>%
filter(linkprim %in% c("P", "C", "J")) %>%
merge(comp.cik.merge, by="gvkey") %>% # inner join, keep only if permno exists
mutate(datadate = as.Date(datadate),
permno = as.factor(lpermno),
linkdt = as.Date(linkdt),
linkenddt = as.Date(linkenddt),
linktype = factor(linktype, levels=c("LC", "LU", "LS")),
linkprim = factor(linkprim, levels=c("P", "C", "J"))) %>%
# remove compustat fiscal ends that do not fall within linked period; linkenddt=NA (from .E) means ongoing
filter(datadate >= linkdt & (datadate <= linkenddt | is.na(linkenddt))) %>%
# prioritize linktype, linkprim based on order of preference/primary if duplicate
arrange(datadate, permno, linktype, linkprim) %>%
distinct(datadate, permno, .keep_all = TRUE)
data.ccm.clean<-data.ccm %>% dplyr::select(gvkey, linkdt, linkenddt,cusip,cik,permno)
data.ccm.clean$linkenddt<-as.Date(ifelse(is.na(data.ccm.clean$linkenddt),as.Date(Sys.Date(),"%Y-%m-%d"),as.Date(data.ccm.clean$linkenddt,"%Y-%m-%d")),origin='1970-01-01')
link.file.master<-data.ccm.clean %>% split(., sort(as.numeric(rownames(.)))) %>%
map(~complete(data=.x, linkdt = seq.Date(as.Date(.x$linkdt, format="%Y-%m-%d"),as.Date(.x$linkenddt, format="%Y-%m-%d"),by="day"))) %>%
map(~fill(data=.x,gvkey,cusip,cik,permno)) %>% bind_rows() %>% distinct(permno,gvkey,linkdt,.keep_all = TRUE) %>% dplyr::select(-linkenddt)
######### Merge To Link File
head(totalSplit)
head(link.file.master)
finalOut<-merge(totalSplit,link.file.master,
by.x=c("permno","date"),
by.y=c("permno","linkdt"),
all.x = TRUE)
head(finalOut)
##### Out the files
saveRDS(finalOut,"/scratch/wvu/SplitFinal.rds")
write.csv(finalOut,"/scratch/wvu/SplitFinal.csv",row.names = FALSE)
|
ddd1d33fad76b65508f51dae417e7b4e40578d33
|
aac2208b8358941b1655e58a2eaddf62a81e06d6
|
/code/week22_quarantinis.R
|
8e6f3200f35f50ebde03e77543e0909dc8256f06
|
[] |
no_license
|
VeeBurton/tidee-tuesday
|
dd44b5a4460462fe128b6cd45c0ba8b759fa7e49
|
46bfc896e5b023e6ddfdc331e80e10931052a2f5
|
refs/heads/master
| 2023-05-14T01:41:26.492916
| 2021-06-02T14:11:05
| 2021-06-02T14:11:05
| 259,920,171
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,368
|
r
|
week22_quarantinis.R
|
library(tidyverse)
# data please
tuesdata <- tidytuesdayR::tt_load(2020, week = 22)
cocktails <- tuesdata$cocktails
# warnings
# measure column intentionally left as a string with a number + volume/unit...
# so that you can try out potential strategies to cleaning it up.
# some potential tools:
# tidyr::separate(): Link
# stringr::str_split(): Link
# have a looook
glimpse(cocktails)
cocktails$measure
# mayyyybe i'll just ignore measure
# what question do i want to ask?
# how faffy are cocktails to make? can they take up maximum time in quarantine
candidates <- cocktails %>%
group_by(drink) %>%
summarise(faff_level=sum(ingredient_number)) %>%
arrange(desc(faff_level))
(average_faff <- mean(candidates$faff_level))
summary(candidates$faff_level)
very_faffy <- candidates %>%
filter(faff_level>=15)
very_faffy <- unique(very_faffy$drink)
quarantinis <- cocktails[cocktails$drink %in% very_faffy, ]
summary(quarantinis)
quarantinis2 <- quarantinis %>%
group_by(drink) %>%
mutate(faff_level=sum(ingredient_number))
summary(quarantinis2$faff_level)
quarantinis2 <- quarantinis2 %>%
mutate(faffiness=ifelse(faff_level<=21, "very faffy", "ridiculously faffy"))
ggplot(quarantinis2)+
theme_minimal()+
ggtitle("The faffiest quarantinis available")+
geom_col(aes(x=drink,y=faff_level))+
coord_polar()+
facet_wrap(~faffiness)
|
db400f3c98dd55a7526351f608c8736d23bec67a
|
a218c73b9c34d9140be85bfc8755344554c88a5e
|
/man/euc.Rd
|
0b2dcfddcad458b32a84bb7ed635bd156f05db40
|
[
"Apache-2.0"
] |
permissive
|
EvanYathon/distrrr-1
|
1b9f11db5c0e524216be0fbca6e14b69b5c71c6f
|
b3b466ac230031104865666e49c1a4129f59f529
|
refs/heads/master
| 2020-04-29T05:57:25.916933
| 2019-03-09T00:11:22
| 2019-03-09T00:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 395
|
rd
|
euc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_distance.R
\name{euc}
\alias{euc}
\title{euc}
\usage{
euc(point1, point2)
}
\arguments{
\item{point1}{vector with numeric values}
\item{point2}{vector with numeric values}
}
\value{
float, the distance between point1 and point2 based on the euclidean
}
\description{
euc
}
\examples{
euc(c(0,0,0), c(1,0,0))
}
|
058be1373e2eff944cc6171d77e458cf287ddd6c
|
e6dcecf42fd5ab6bd47aaf9c9e91d05143983f14
|
/R/cgibbs.R
|
aa62309da04e434b4d4a2abe33c5ad0db2724fea
|
[
"Artistic-2.0"
] |
permissive
|
hillarykoch/CLIMB
|
73c0c544443df894c473eb8d139745a4e8c103b4
|
4d28091eb1b0447907e6fe7563edfc096c0eddb4
|
refs/heads/master
| 2022-11-05T14:58:50.302252
| 2022-10-18T12:24:24
| 2022-10-18T12:24:24
| 197,435,666
| 7
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,533
|
r
|
cgibbs.R
|
# Functions to call anything from cgibbs.jl
prepare_julia <- function() {
# Find julia v1.0.2 binary
julia <- JuliaCall::julia_setup()
ver <-
as.numeric(stringr::str_split(string = julia$VERSION, pattern = "\\.")[[1]][1])
if (ver < 1) {
stop("Julia version > 1.0 required for this package to run.")
}
# load cgibbs module
JuliaCall::julia_command("using cgibbs")
JuliaCall::julia_command("using StatsBase")
JuliaCall::julia_command("using DataFrames")
JuliaCall::julia_command("using LinearAlgebra")
JuliaCall::julia_command("using Distributions")
}
run_mcmc <- function(dat, hyp, nstep, retained_classes) {
if (length(hyp$alpha) != nrow(retained_classes)) {
stop(
"length(hyp$alpha) must be the same as nrow(retained_classes). These both
correspond to the cluster number."
)
}
dat <- data.frame(dat)
prepare_julia()
# Compute some values from the inputs
dm <- ncol(dat)
n <- nrow(dat)
nm <- length(hyp$alpha)
nw <- 1
# Assign inputs names in Julia
JuliaCall::julia_assign("dat", dat)
JuliaCall::julia_assign("kappa0", hyp$kappa0)
JuliaCall::julia_assign("mu0", hyp$mu0)
JuliaCall::julia_assign("Psi0", hyp$Psi0)
JuliaCall::julia_command("hyp = (kappa0, mu0, Psi0);")
JuliaCall::julia_assign("alph", hyp$alpha)
JuliaCall::julia_assign("reduced_classes", retained_classes)
# Conver floats to integers when appropriate
JuliaCall::julia_assign("nw", 1)
JuliaCall::julia_assign("nw", JuliaCall::julia_eval("Int64(nw)"))
JuliaCall::julia_assign("dm", dm)
JuliaCall::julia_assign("dm", JuliaCall::julia_eval("Int64(dm)"))
JuliaCall::julia_assign("n", n)
JuliaCall::julia_assign("n", JuliaCall::julia_eval("Int64(n)"))
JuliaCall::julia_assign("nm", nm)
JuliaCall::julia_assign("nm", JuliaCall::julia_eval("Int64(nm)"))
JuliaCall::julia_assign("nstep", nstep)
JuliaCall::julia_assign("nstep", JuliaCall::julia_eval("Int64(nstep)"))
JuliaCall::julia_assign("tune_df", rep(1000.0, nm))
# Generate starting values
JuliaCall::julia_assign(
"param",
JuliaCall::julia_eval(
"Array{Tuple{Array{Dict{String,Array{Float64,N} where N},1},Array{Float64,1},Array{Int64,1}}}(undef, (nw, 1));"
)
)
for (i in 1:nw) {
JuliaCall::julia_assign(
"dictionary",
JuliaCall::julia_eval("Dict{String,Array{Float64,N} where N}[];")
)
for (m in 1:nm) {
JuliaCall::julia_assign("m", m)
JuliaCall::julia_assign("m", JuliaCall::julia_eval("Int64(m)"))
JuliaCall::julia_assign("Sigma",
JuliaCall::julia_eval("Matrix(Hermitian(Psi0[:,:,m]));"))
JuliaCall::julia_assign("mu", JuliaCall::julia_eval("mu0[m,:];"))
JuliaCall::julia_command("push!(dictionary, Dict(\"mu\" => mu, \"Sigma\" => Sigma));")
}
JuliaCall::julia_assign("z",
JuliaCall::julia_eval("wsample(1:1:nm, alph, n; replace = true);"))
JuliaCall::julia_assign("i", i)
JuliaCall::julia_command("param[i,1] = (dictionary, alph, z);")
}
labels <-
apply(retained_classes + 1, 1, function(X)
paste0(X, collapse = ""))
JuliaCall::julia_assign("labels", labels)
out <-
JuliaCall::julia_eval("cgibbs.run_mcmc(dat, param, hyp, alph, nstep, labels, tune_df);")
names(out) <-
c("chain", "acceptance_rate_chain", "tune_df_chain")
out
}
extend_mcmc <- function(dat, hyp, nstep, retained_classes, mcmc) {
prepare_julia()
dat <- data.frame(dat)
# Compute some values from the inputs
dm <- ncol(dat)
n <- nrow(dat)
nm <- length(hyp$alpha)
nw <- 1
# Assign inputs names in Julia
JuliaCall::julia_assign("dat", dat)
JuliaCall::julia_assign("kappa0", hyp$kappa0)
JuliaCall::julia_assign("mu0", hyp$mu0)
JuliaCall::julia_assign("Psi0", hyp$Psi0)
JuliaCall::julia_command("hyp = (kappa0, mu0, Psi0);")
JuliaCall::julia_assign("alph", hyp$alpha)
# Assign inputs names in Julia
JuliaCall::julia_assign("nstep", nstep)
JuliaCall::julia_assign("nstep", JuliaCall::julia_eval("Int64(nstep)"))
JuliaCall::julia_assign("chain", mcmc$chain)
JuliaCall::julia_assign("tune_df_chain", mcmc$tune_df_chain)
JuliaCall::julia_assign("labels", apply(retained_classes + 1, 1, function(X)
paste0(X, collapse = "")))
out <-
JuliaCall::julia_eval("cgibbs.extend_mcmc(dat, chain, tune_df_chain, hyp, alph, nstep, labels);")
names(out) <- c("chain", "acceptance_rate_chain", "tune_df_chain")
out
}
extract_chains <- function(mcmc) {
prepare_julia()
acceptance_rate_chain <- t(mcmc$acceptance_rate_chain)
tune_df_chain <- t(mcmc$tune_df_chain)
nm <- ncol(acceptance_rate_chain)
JuliaCall::julia_assign("chain", mcmc$chain)
JuliaCall::julia_assign("nm", nm)
JuliaCall::julia_assign("nm", JuliaCall::julia_eval("Int64(nm)"))
mu_chains <- Sigma_chains <- list()
for (m in 1:nm) {
JuliaCall::julia_assign("m", m)
JuliaCall::julia_assign("m", JuliaCall::julia_eval("Int64(m)"))
JuliaCall::julia_assign("mu_chains",
JuliaCall::julia_eval("[cgibbs.get_mu_chain(chain, m) for m in 1:nm]"))
mu_chains[[m]] <- t(JuliaCall::julia_eval("mu_chains[m]"))
Sigma_chains[[m]] <-
JuliaCall::julia_eval("cgibbs.get_Sigma_chain(chain, m)")
}
z_chain <- JuliaCall::julia_eval("cgibbs.get_z_chain(chain);")
prop_chain <- t(JuliaCall::julia_eval("cgibbs.get_prop_chain(chain);"))
list(
"mu_chains" = mu_chains,
"Sigma_chains" = Sigma_chains,
"prop_chain" = prop_chain,
"z_chain" = z_chain
)
}
|
5ae814d48f971e5c6d35d3afcdec37b57c082389
|
9719ea69f693adfddc62b27eaf948fc7b16f6ad0
|
/man/map_wastd.Rd
|
172b43d774a213b32eac4fc4a96f924af09e433b
|
[] |
no_license
|
dbca-wa/wastdr
|
49fe2fb1b8b1e518f6d38549ff12309de492a2ad
|
5afb22d221d6d62f6482798d9108cca4c7736040
|
refs/heads/master
| 2022-11-18T01:00:41.039300
| 2022-11-16T08:32:12
| 2022-11-16T08:32:12
| 86,165,655
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,312
|
rd
|
map_wastd.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/map_wastd.R
\name{map_wastd}
\alias{map_wastd}
\title{Map Marine Wildlife Incident 0.6}
\usage{
map_wastd(
x,
map_animals = TRUE,
map_tracks = TRUE,
map_dist = TRUE,
map_sites = TRUE,
wastd_url = wastdr::get_wastd_url(),
fmt = "\%Y-\%m-\%d \%H:\%M",
tz = "Australia/Perth",
cluster = FALSE
)
}
\arguments{
\item{x}{An object of S3 class \code{wastd_data}, e.g. the output of
\code{wastdr::download_wastd_turtledata()}, optionally filtered to a locality
by \code{wastdr::filter_wastd_turtledata(area_name="Thevenard Island")}.}
\item{map_animals}{Whether to map animals (AnimalEncounters), default: TRUE}
\item{map_tracks}{Whether to map tracks (TurtleNestEncounters), default: TRUE}
\item{map_dist}{Whether to map nest and general disturbances
(Encounters with TurtleNestDisturbanceObservation), default: TRUE}
\item{map_sites}{Whether to map Sites, default: TRUE}
\item{wastd_url}{The base URL for WAStD, default: \code{get_wastd_url()}}
\item{fmt}{The desired date format, default: "d/m/Y H:M"}
\item{tz}{The lubridate timezone, default: "Australia/Perth}
\item{cluster}{If TRUE, cluster map markers. Default: FALSE.
Note: In some places, the aerial background layer does not provide imagery
at sufficient zoom levels, and therefore restricts the map zoom at levels
where the cluster markers don't expand. Switch to "Place names" to let
cluster markers expand.}
}
\value{
A leaflet map
}
\description{
\lifecycle{maturing}
}
\details{
Creates a Leaflet map with an interactive legend offering to toggle
each species separately. The maps auto-zooms to the extent of data given.
}
\examples{
data(wastd_data)
map_wastd(wastd_data)
}
\seealso{
Other wastd:
\code{\link{add_nest_labels}()},
\code{\link{disturbance_by_season}()},
\code{\link{filter_alive}()},
\code{\link{filter_dead}()},
\code{\link{filter_disturbance}()},
\code{\link{filter_predation}()},
\code{\link{ggplot_disturbance_by_season}()},
\code{\link{ggplot_emergence_success}()},
\code{\link{ggplot_hatching_success}()},
\code{\link{ggplot_hatchling_misorientation}()},
\code{\link{ggplot_nesting_success_per_area_season_species_pct}()},
\code{\link{ggplot_nesting_success_per_area_season_species}()},
\code{\link{ggplot_sighting_status_per_area_season_species}()},
\code{\link{ggplot_total_emergences_per_area_season_species}()},
\code{\link{ggplot_track_success_by_date}()},
\code{\link{ggplot_track_successrate_by_date}()},
\code{\link{hatching_emergence_success_area}()},
\code{\link{hatching_emergence_success_site}()},
\code{\link{hatching_emergence_success}()},
\code{\link{map_dist}()},
\code{\link{map_fanangles}()},
\code{\link{map_mwi}()},
\code{\link{map_nests}()},
\code{\link{map_tracks}()},
\code{\link{map_wastd_wamtram_sites}()},
\code{\link{nesting_success_per_area_day_species}()},
\code{\link{nesting_success_per_area_season_species}()},
\code{\link{nesting_type_by_area_season_age_species}()},
\code{\link{nesting_type_by_area_season_species}()},
\code{\link{nesting_type_by_season_age_species}()},
\code{\link{nesting_type_by_season_calendarday_age_species}()},
\code{\link{nesting_type_by_season_calendarday_species}()},
\code{\link{nesting_type_by_season_day_species}()},
\code{\link{nesting_type_by_season_species}()},
\code{\link{nesting_type_by_season_week_age_species}()},
\code{\link{nesting_type_by_season_week_site_species}()},
\code{\link{nesting_type_by_season_week_species}()},
\code{\link{nesting_type_by_site_season_age_species}()},
\code{\link{nesting_type_by_site_season_species}()},
\code{\link{parse_animal_encounters}()},
\code{\link{parse_area_sf}()},
\code{\link{parse_area}()},
\code{\link{parse_encounterobservations}()},
\code{\link{parse_surveys}()},
\code{\link{parse_turtle_nest_encounters}()},
\code{\link{print.wastd_api_response}()},
\code{\link{sighting_status_per_area_season_species}()},
\code{\link{sighting_status_per_site_season_species}()},
\code{\link{summarise_hatching_and_emergence_success}()},
\code{\link{summarise_wastd_data_per_day_site}()},
\code{\link{total_emergences_per_area_season_species}()},
\code{\link{total_emergences_per_site_season_species}()},
\code{\link{track_success_by_species}()},
\code{\link{track_success}()},
\code{\link{tracks_ts}()}
}
\concept{wastd}
|
0b89aeb94f697253076a87482c30805f78305450
|
64f84f0edb31a8aac3418415828b1176b6e15c64
|
/assignment4/notsouseful.R
|
7732d87b034e30fcd860ca9484e2711e250bbd06
|
[] |
no_license
|
Oyelowo/Advanced-Remote-Sensing-2
|
721651ddbc05b817a63162a80787f8c5950fa664
|
4582bc0602ad34e40ebc1ba1da4246f7abba7192
|
refs/heads/master
| 2020-03-29T16:52:33.035112
| 2018-09-24T16:15:34
| 2018-09-24T16:15:34
| 150,131,530
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 2,988
|
r
|
notsouseful.R
|
########################################
# Feature extraction for tree segments #
########################################
rm(list=ls())
setwd("C:/HY-data/MIKNIEMI/GIS202/Exercises/Exercise 5 materials")
##################################################################################
# Read 3D point data (TEXAS_lidar.txt)
Lidar <- read.table(file="TEXAS_lidar.txt", head=FALSE, sep="\t")
colnames(Lidar) <- c("Year","Pulse","X","Y","Z","h","range","int","agc")
summary(Lidar)
# Specify column names of the coordinates
coordinates(Lidar) = c("X", "Y")
##################################################################################
# Read the shapefile (TrainingTrees.shp)
library(sp)
library(maptools)
S <- readShapePoly("TrainingTrees.shp") #Tree segments
SP <- as(S, "SpatialPolygons")
# Plot tree segments
plot(SP)
##################################################################################
# Analyse, which Lidar points are located inside the segments
Lidar$In <- over(Lidar,SP)
# Select the 2010 Lidar data, and remove the points located outside of the segments
Lidar10 <- subset(Lidar, Lidar$Year==10)
Lidar10 <- subset(Lidar10, Lidar10$In!="NA")
summary (Lidar10)
#######################################################################################
#####################################################################
### TEE ALLA OLEVAAN KOODIIN TARVITTAVAT LISÄYKSET JA MUOKKAUKSET ###
#####################################################################
ID <- 999; X <- 999; Y <- 999; Hmax <- 999; Hmean <- 999; Hstd <- 999; CV <- 999; VD <- 999;
h10 <- 999; h20 <- 999; h30 <- 999; h40 <- 999; h50 <- 999; h60 <- 999; h70 <- 999; h80 <- 999; h90 <- 999;
Lidar10 <- data.frame(Lidar10)
range(Lidar10$In)
View(Lidar10)
Lidar10 <- data.frame(Lidar10)
from <- min(Lidar10$In)
to<-max(Lidar10$In)
Lidar10<- data.frame(Lidar10)
for (i in (from:to)){
if(match(i,Lidar10$In, nomatch = 999999)<999999){
x<- subset(Lidar10, Lidar10$In==i)
ID[i]<- i
X[i] <- lfisi[which.max(lfisi[,"h"]),"X"]
Y[i] <- lfisi[which.max(lfisi[,"h"]),"Y"]
}
else{ID[i]<-9999999999}
}
results <- cbind(ID,X,Y)
temp <- subset(Lidar10, Lidar10$In == 1)
# And remember also to subset only first and single echoes!
X <- temp[which.max(temp[,"h"]),"X"] #Returns "X" from the point that has the maximum "h" value
Y <- temp[which.max(temp[,"h"]),"Y"] #Returns "Y" from the point that has the maximum "h" value
Hmax <- temp[which.max(temp[,"h"]),"h"]
# Find the maximum height
# Calculate the mean height of point height values
# Calculate the standard deviation of point height values
# Calculate the coefficient of variation in height values
# Calculate the vegetation density
# Calculate quantiles #help(quantile)
results <- cbind(ID,X,Y,Hmax,Hmean,Hstd,CV,VD,h10,h20,h30,h40,h50,h60,h70,h80,h90)
results <- as.data.frame(results)
write.table(results, file = "Results.csv", dec=".", sep=",",row.names=F)
|
fd32900a7b0aa0b5544c93c32c349819a57895d0
|
97d2622edd598afcaed1f1c2c1be12197ee2de5f
|
/Breast/GREAT.R
|
611c2e021e8e83faf5281cee6b9a51e1f2ab30d2
|
[] |
no_license
|
gloriali/HirstLab
|
8a95bd0159b349cf993eaea7e7455f3afa39d3fe
|
f95e3818f39ff476227ca8fc02c032f1cbaf79e9
|
refs/heads/master
| 2022-11-30T23:19:32.541188
| 2020-03-05T01:55:52
| 2020-03-05T01:55:52
| 19,998,930
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,887
|
r
|
GREAT.R
|
# Figure S17
setwd("~/快盘/Publications/breast/revision/sup/FigureS17")
lum.GOBP <- read.delim("shown-GOBiologicalProcess.lum.UMR.tsv", head = T, as.is = T)
lum.MSig <- read.delim("shown-MSigDBGeneSetsPerturbation.lum.UMR.tsv", head = T, as.is = T)
myo.GOBP <- read.delim("shown-GOBiologicalProcess.myo.UMR.tsv", head = T, as.is = T)
myo.pathway <- read.delim("shown-pathwayCommons.myo.UMR.tsv", head = T, as.is = T)
GREAT <- na.omit(data.frame(cell = rep(c("lum.UMR", "myo.UMR"), each = 20), Category = rep(c("GOBP", "MSigPerturbation", "GOBP", "PathwaysCommon"), each = 10),
Term = c(lum.GOBP$Term.Name[1:10], lum.MSig$Term.Name[1:10], myo.GOBP$Term.Name[1:10], myo.pathway$Term.Name[1:10]),
FDR = c(lum.GOBP$Binom.FDR.Q.Val[1:10], lum.MSig$Binom.FDR.Q.Val[1:10], myo.GOBP$Binom.FDR.Q.Val[1:10], myo.pathway$Binom.FDR.Q.Val[1:10])))
GREAT$Term <- as.character(GREAT$Term)
for(i in 1:nrow(GREAT)){
if(nchar(GREAT$Term[i]) > 120){
GREAT$Term[i] <- paste0(substr(GREAT$Term[i], 1, as.integer(nchar(GREAT$Term[i])/3)), "-\n",
substr(GREAT$Term[i], as.integer(nchar(GREAT$Term[i])/3) + 1, 2*as.integer(nchar(GREAT$Term[i])/3)), "-\n",
substr(GREAT$Term[i], 2*as.integer(nchar(GREAT$Term[i])/3) + 1, nchar(GREAT$Term[i])))
}
if(nchar(GREAT$Term[i]) > 60 & nchar(GREAT$Term[i]) <= 120){
GREAT$Term[i] <- paste0(substr(GREAT$Term[i], 1, as.integer(nchar(GREAT$Term[i])/2)), "-\n", substr(GREAT$Term[i], as.integer(nchar(GREAT$Term[i])/2)+1, nchar(GREAT$Term[i])))
}
}
GREAT_lum <- droplevels(GREAT[as.character(GREAT$cell) == "lum.UMR",])
GREAT_lum$Term <- factor(GREAT_lum$Term, levels = GREAT_lum$Term[length(GREAT_lum$Term):1])
GREAT_myo <- droplevels(GREAT[as.character(GREAT$cell) == "myo.UMR",])
GREAT_myo$Term <- factor(GREAT_myo$Term, levels = GREAT_myo$Term[length(GREAT_myo$Term):1])
library(ggplot2)
(GREAT_lum_plot <- ggplot(data = GREAT_lum, aes(Term, -log10(FDR))) +
geom_bar(aes(fill = Category), width = .5) +
coord_flip() +
geom_text(aes(label = round(-log10(FDR), 2), hjust = 0)) +
facet_grid(cell ~ .) +
xlab("") +
ylab("") +
theme_bw() +
scale_fill_manual(values = c("GOBP" = "blue", "MSigPerturbation" = "purple", "PathwaysCommon" = "darkgreen")))
ggsave(GREAT_lum_plot, file = "GREAT_lum.pdf", width = 12, height = 7)
(GREAT_myo_plot <- ggplot(data = GREAT_myo, aes(Term, -log10(FDR))) +
geom_bar(aes(fill = Category), width = .5) +
coord_flip() +
geom_text(aes(label = round(-log10(FDR), 2), hjust = 0)) +
facet_grid(cell ~ .) +
xlab("") +
ylab("-log10(Binomial FDR)") +
theme_bw() +
scale_fill_manual(values = c("GOBP" = "blue", "MSigPerturbation" = "purple", "PathwaysCommon" = "darkgreen")))
ggsave(GREAT_myo_plot, file = "GREAT_myo.pdf", width = 12, height = 7)
# Figure S26
setwd("~/快盘/Publications/breast/revision/sup/FigureS26")
lum.GOBP <- read.delim("shown-GOBiologicalProcess.lum.UMR.TF.tsv", head = T, as.is = T)
lum.MSig <- read.delim("shown-MSigDBGeneSetsPerturbation.lum.UMR.TF.tsv", head = T, as.is = T)
myo.GOMF <- read.delim("shown-GOMolecularFunction.myo.UMR.TF.tsv", head = T, as.is = T)
myo.GOBP <- read.delim("shown-GOBiologicalProcess.myo.UMR.TF.tsv", head = T, as.is = T)
myo.pathway <- read.delim("shown-pathway.myo.UMR.TF.tsv", head = T, as.is = T)
GREAT <- data.frame(cell = c(rep("lum.UMRs.with.TFs", 20), rep("myo.UMRs.with.TFs", 30)), Category = rep(c("GOBP", "MSigPerturbation", "GOMF", "GOBP", "PathwaysCommon"), each = 10),
Term = c(lum.GOBP$Term.Name[1:10], lum.MSig$Term.Name[1:10], myo.GOMF$Term.Name[1:10], myo.GOBP$Term.Name[1:10], myo.pathway$Term.Name[1:10]),
FDR = c(lum.GOBP$Binom.FDR.Q.Val[1:10], lum.MSig$Binom.FDR.Q.Val[1:10], myo.GOMF$Binom.FDR.Q.Val[1:10], myo.GOBP$Binom.FDR.Q.Val[1:10], myo.pathway$Binom.FDR.Q.Val[1:10]))
GREAT$Term <- as.character(GREAT$Term)
GREAT <- na.omit(GREAT)
for(i in 1:nrow(GREAT)){
if(nchar(GREAT$Term[i]) > 120){
GREAT$Term[i] <- paste0(substr(GREAT$Term[i], 1, as.integer(nchar(GREAT$Term[i])/3)), "-\n",
substr(GREAT$Term[i], as.integer(nchar(GREAT$Term[i])/3) + 1, 2*as.integer(nchar(GREAT$Term[i])/3)), "-\n",
substr(GREAT$Term[i], 2*as.integer(nchar(GREAT$Term[i])/3) + 1, nchar(GREAT$Term[i])))
}
if(nchar(GREAT$Term[i]) > 60 & nchar(GREAT$Term[i]) <= 120){
GREAT$Term[i] <- paste0(substr(GREAT$Term[i], 1, as.integer(nchar(GREAT$Term[i])/2)), "-\n", substr(GREAT$Term[i], as.integer(nchar(GREAT$Term[i])/2)+1, nchar(GREAT$Term[i])))
}
}
GREAT_lum <- droplevels(GREAT[1:20,])
GREAT_lum$Term <- factor(GREAT_lum$Term, levels = GREAT_lum$Term[length(GREAT_lum$Term):1])
GREAT_myo <- droplevels(GREAT[21:43,])
GREAT_myo$Term <- factor(GREAT_myo$Term, levels = GREAT_myo$Term[length(GREAT_myo$Term):1])
library(ggplot2)
(GREAT_lum_plot <- ggplot(data = GREAT_lum, aes(Term, -log10(FDR))) +
geom_bar(aes(fill = Category), width = .5) +
coord_flip() +
geom_text(aes(label = round(-log10(FDR), 2), hjust = 0)) +
facet_grid(cell ~ .) +
xlab("") +
ylab("") +
theme_bw() +
scale_fill_manual(values = c("GOMF" = "brown", "GOBP" = "blue", "MSigPerturbation" = "purple", "PathwaysCommon" = "darkgreen")))
ggsave(GREAT_lum_plot, file = "GREAT_lum.pdf", width = 12, height = 9)
(GREAT_myo_plot <- ggplot(data = GREAT_myo, aes(Term, -log10(FDR))) +
geom_bar(aes(fill = Category), width = .5) +
coord_flip() +
geom_text(aes(label = round(-log10(FDR), 2), hjust = 0)) +
facet_grid(cell ~ .) +
xlab("") +
ylab("-log10(Binomial FDR)") +
theme_bw() +
scale_fill_manual(values = c("GOMF" = "brown", "GOBP" = "blue", "MSigPerturbation" = "purple", "PathwaysCommon" = "darkgreen")))
ggsave(GREAT_myo_plot, file = "GREAT_myo.pdf", width = 12, height = 7)
|
8847d34e4994b495f01adbc17df672ea211bd287
|
fc239655fcb08de514c131beda0c089463a6e820
|
/app/modules/welcome.R
|
58518266ec0761d9ad0ee16ca1be707bd1e538fd
|
[] |
no_license
|
jpolonsky/pledges
|
8ddd39695edc8d9f812ac7607ca5a9a96fbd337b
|
80ea696da7e194af99d2a7e590c7013806df8dc4
|
refs/heads/master
| 2021-01-24T09:14:26.641765
| 2017-03-17T12:17:45
| 2017-03-17T12:17:45
| 69,883,447
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 238
|
r
|
welcome.R
|
Welcome <- function(input, output, session){
paste("Hey Paula's team!",
"Please upload your excel file using the Upload Data button",
img(src = "arrow.png", width = "5%"),
sep = '<br/>') %>%
HTML
}
|
8a514f8e41333f40d86e0e507b00f671edc556a4
|
f416f02e2e6eb2ab304966a1feabda65295228b2
|
/tests/testthat/test-attack_model.R
|
e446135e151284f307ce4fa474f5e23aa4e346fb
|
[] |
no_license
|
nicholascarey/attackR
|
5150a55ef9c7176e08178ae8b799ab959b3d770d
|
287544fe96ef9eb58c33e3de1ed1755da97975ab
|
refs/heads/master
| 2020-07-26T20:30:10.820508
| 2020-07-16T15:43:04
| 2020-07-16T15:43:04
| 208,758,145
| 0
| 0
| null | 2020-07-16T09:35:28
| 2019-09-16T09:14:23
|
R
|
UTF-8
|
R
| false
| false
| 4,398
|
r
|
test-attack_model.R
|
# library(testthat)
# Profiles ----------------------------------------------------------------
## stops if profiles contain values outside correct range
expect_error(attack_model(500, 180, 60, 1000, 250, 250,
profile_v = c(0, 0.4, 0.8, 1.2),
profile_h = c(0, 0.4, 0.8)),
"Body profiles must only contain values between 0 and 1.")
expect_error(attack_model(500, 180, 60, 1000, 250, 250,
profile_v = c(0, 0.4, 0.8),
profile_h = c(0, 0.4, 0.8, 1.2)),
"Body profiles must only contain values between 0 and 1.")
expect_error(attack_model(500, 180, 60, 1000, 250, 250,
profile_v = c(0, 0.4, 0.8, -0.2),
profile_h = c(0, 0.4, 0.8)),
"Body profiles must only contain values between 0 and 1.")
expect_error(attack_model(500, 180, 60, 1000, 250, 250,
profile_v = c(0, 0.4, 0.8),
profile_h = c(0, 0.4, 0.8, -0.2)),
"Body profiles must only contain values between 0 and 1.")
## stops if no profile entered
expect_error(attack_model(500, 180, 60, 1000, 250, 250,
profile_v = NULL,
profile_h = NULL),
"Provide at least one body profile.")
## stops if profiles less than 3
expect_error(attack_model(500, 180, 60, 1000, 250, 250,
profile_v = c(0.2,0.3),
profile_h = NULL),
"Profiles must be at least 3 values long: e.g. nose, midpoint, tail.")
expect_error(attack_model(500, 180, 60, 1000, 250, 250,
profile_v = NULL,
profile_h = c(0.2,0.3)),
"Profiles must be at least 3 values long: e.g. nose, midpoint, tail.")
expect_error(attack_model(500, 180, 60, 1000, 250, 250,
profile_v = c(0.2),
profile_h = c(0.2,0.3)),
"Profiles must be at least 3 values long: e.g. nose, midpoint, tail.")
# Max Width Locations -----------------------------------------------------
expect_error(attack_model(500, 180, 60, 1000, 250, 250,
profile_v = c(0.2, 0.3, 0.4, 0.5),
profile_h = c(0.2, 0.3, 0.4, 0.5),
max_width_loc_v = -0.5,
"Max width locations must be between 0 and 1"))
expect_error(attack_model(500, 180, 60, 1000, 250, 250,
profile_v = c(0.2, 0.3, 0.4, 0.5),
profile_h = c(0.2, 0.3, 0.4, 0.5),
max_width_loc_v = 1.5,
"Max width locations must be between 0 and 1"))
expect_error(attack_model(500, 180, 60, 1000, 250, 250,
profile_v = c(0.2, 0.3, 0.4, 0.5),
profile_h = c(0.2, 0.3, 0.4, 0.5),
max_width_loc_h = -0.5,
"Max width locations must be between 0 and 1"))
expect_error(attack_model(500, 180, 60, 1000, 250, 250,
profile_v = c(0.2, 0.3, 0.4, 0.5),
profile_h = c(0.2, 0.3, 0.4, 0.5),
max_width_loc_h = 1.5,
"Max width locations must be between 0 and 1"))
# Output ------------------------------------------------------------------
## list is created when simple_output = FALSE
expect_output(str(attack_model(500, 180, 60, 1000, 250, 250,
profile_v = c(0.2, 0.3, 1, 0.5),
profile_h = c(0.2, 0.3, 1, 0.5),
simple_output = FALSE)),
"List of 5")
## data frame is created when simple_output = TRUE
expect_output(str(attack_model(500, 180, 60, 1000, 250, 250,
profile_v = c(0.2, 0.3, 1, 0.5),
profile_h = c(0.2, 0.3, 1, 0.5),
simple_output = TRUE)),
"data.frame")
## test class when simple_output = FALSE
model <- attack_model(500, 180, 60, 1000, 250, 250,
profile_v = c(0.2, 0.3, 1, 0.5),
profile_h = c(0.2, 0.3, 1, 0.5),
simple_output = FALSE)
expect_is(model, "attack_model")
|
16c4f4edb366a815e92e767c5e7683e6257b4071
|
725a33f27fce430ee481a3542aae5bb81a94dfc0
|
/man/MQDataReader-class.Rd
|
a050adddb33e0fbcb534cea30775424d889c65e6
|
[
"BSD-3-Clause"
] |
permissive
|
cbielow/PTXQC
|
fac47ecfa381737fa0cc36d5ffe7c772400fb24e
|
f4dc4627e199088c83fdc91a1f4c5d91f381da6c
|
refs/heads/master
| 2023-07-20T00:39:45.918617
| 2023-05-17T14:23:03
| 2023-05-17T14:23:03
| 20,481,452
| 41
| 30
|
NOASSERTION
| 2023-05-17T14:23:04
| 2014-06-04T11:53:49
|
HTML
|
UTF-8
|
R
| false
| true
| 6,506
|
rd
|
MQDataReader-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MQDataReader.R
\docType{class}
\name{MQDataReader-class}
\alias{MQDataReader-class}
\alias{MQDataReader}
\title{S5-RefClass to read MaxQuant .txt files}
\arguments{
\item{file}{(Relative) path to a MQ txt file.}
\item{filter}{Searched for "C" and "R". If present, [c]ontaminants and [r]everse hits are removed if the respective columns are present.
E.g. to filter both, \code{filter = "C+R"}}
\item{type}{Allowed values are:
"pg" (proteinGroups) [default], adds abundance index columns (*AbInd*, replacing 'intensity')
"sm" (summary), splits into three row subsets (raw.file, condition, total)
"ev" (evidence), will fix empty modified.sequence cells for older MQ versions (when MBR is active)
"msms_scans", will fix invalid (negative) scan event numbers
Any other value will not add/modify any columns}
\item{col_subset}{A vector of column names as read by read.delim(), e.g., spaces are replaced by dot already.
If given, only columns with these names (ignoring lower/uppercase) will be returned (regex allowed)
E.g. col_subset=c("^lfq.intensity.", "protein.name")}
\item{add_fs_col}{If TRUE and a column 'raw.file' is present, an additional column 'fc.raw.file' will be added with
common prefix AND common substrings removed (\code{\link{simplifyNames}})
E.g. two rawfiles named 'OrbiXL_2014_Hek293_Control', 'OrbiXL_2014_Hek293_Treated' will give
'Control', 'Treated'
If \code{add_fs_col} is a number AND the longest short-name is still longer, the names are discarded and replaced by
a running ID of the form 'file <x>', where <x> is a number from 1 to N.
If the function is called again and a mapping already exists, this mapping is used.
Should some raw.files be unknown (ie the mapping from the previous file is incomplete), they will be augmented}
\item{check_invalid_lines}{After reading the data, check for unusual number of NA's to detect if file was corrupted by Excel or alike}
\item{LFQ_action}{[For type=='pg' only] An additional custom LFQ column ('cLFQ...') is created where
zero values in LFQ columns are replaced by the following method IFF(!) the corresponding raw intensity is >0 (indicating that LFQ is erroneusly 0)
"toNA": replace by NA
"impute": replace by lowest LFQ value >0 (simulating 'noise')}
\item{...}{Additional parameters passed on to read.delim()}
\item{colname}{Name of the column (e.g. 'contaminants') in the mq.data table}
\item{valid_entries}{Vector of values to be replaced (must contain all values expected in the column -- fails otherwise)}
\item{replacements}{Vector of values inserted with the same length as \code{valid_entries}.}
}
\value{
A data.frame of the respective file
Replaces values in the mq.data member with (binary) values.
Most MQ tables contain columns like 'contaminants' or 'reverse', whose values are either empty strings
or "+", which is inconvenient and can be much better represented as TRUE/FALSE.
The params \code{valid_entries} and \code{replacements} contain the matched pairs, which determine what is replaced with what.
Returns \code{TRUE} if successful.
}
\description{
This class is used to read MQ data tables using \code{MQDataReader::readMQ()} while holding
the internal raw file --> short raw file name mapping (stored in a member called
'fn_map') and updating/using it every time \code{MQDataReader::readMQ()} is called.
}
\details{
Since MaxQuant changes capitalization and sometimes even column names, it seemed convenient
to have a function which just reads a txt file and returns unified column names, irrespective of the MQ version.
So, it unifies access to columns (e.g. by using lower case for ALL columns) and ensures columns are
identically named across MQ versions:
\preformatted{
alternative term new term
-----------------------------------------
protease enzyme
protein.descriptions fasta.headers
potential.contaminant contaminant
mass.deviations mass.deviations..da.
basepeak.intensity base.peak.intensity
}
We also correct 'reporter.intensity.*' naming issues to MQ 1.6 convention, when 'reporter.intensity.not.corrected' is present.
MQ 1.5 uses: reporter.intensity.X and reporter.intensity.not.corrected.X
MQ 1.6 uses: reporter.intensity.X and reporter.intensity.corrected.X
Note: you must find a regex which matches both versions, or explicitly add both terms if you are requesting only a subset
of columns!
Fixes for msmsScans.txt:
negative Scan Event Numbers in msmsScans.txt are reconstructed by using other columns
Automatically detects UTF8-BOM encoding and deals with it (since MQ2.4).
Example of usage:
\preformatted{
mq = MQDataReader$new()
d_evd = mq$readMQ("evidence.txt", type="ev", filter="R", col_subset=c("proteins", "Retention.Length", "retention.time.calibration"))
}
If the file is empty, this function shows a warning and returns NULL.
If the file is present but cannot be read, the program will stop.
Wrapper to read a MQ txt file (e.g. proteinGroups.txt).
}
\section{Methods}{
\describe{
\item{\code{getInvalidLines()}}{Detect broken lines (e.g. due to Excel import+export)
When editing a MQ txt file in Microsoft Excel, saving the file can cause it to be corrupted,
since Excel has a single cell content limit of 32k characters
(see http://office.microsoft.com/en-001/excel-help/excel-specifications-and-limits-HP010342495.aspx)
while MQ can easily reach 60k (e.g. in oxidation sites column).
Thus, affected cells will trigger a line break, effectively splitting one line into two (or more).
If the table has an 'id' column, we can simply check the numbers are consecutive. If no 'id' column is available,
we detect line-breaks by counting the number of NA's per row and finding outliers.
The line break then must be in this line (plus the preceeding or following one). Depending on where
the break happened we can also detect both lines right away (if both have more NA's than expected).
Currently, we have no good strategy to fix the problem since columns are not aligned any longer, which
leads to columns not having the class (e.g. numeric) they should have.
(thus one would need to un-do the linebreak and read the whole file again)
[Solution to the problem: try LibreOffice 4.0.x or above -- seems not to have this limitation]
@return Returns a vector of indices of broken (i.e. invalid) lines
}
}}
|
b9f30fd9e37fb73eae8f9969d5b08ee06d26c848
|
e586290da2b5444595b16044e76854205db57a48
|
/man/standard_hex_points.Rd
|
4003ea6a1e29cce76494b316c014d16134011c84
|
[
"MIT",
"ISC"
] |
permissive
|
mattle24/electoral.hex
|
9b16c4e917ecbc5b8cc5c8766156b06ec3e60799
|
3332b41e6e6ed17dac59924889e555222c36b704
|
refs/heads/master
| 2020-06-19T22:36:21.766954
| 2019-07-16T21:54:35
| 2019-07-16T21:54:35
| 196,899,657
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 450
|
rd
|
standard_hex_points.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/standardize_points.R
\name{standard_hex_points}
\alias{standard_hex_points}
\title{Standard state hexagon points}
\usage{
standard_hex_points(hex_centroids)
}
\arguments{
\item{hex_centroids}{sf. An object containing the hexagon centroids.}
}
\description{
Given a simple feature of hexagon centroids for a state, return a simple
features object with standardized x, y.
}
|
25d3cf247c16443527c6b2e606b785cbdea475c3
|
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
|
/A_github/sources/authors/3994/preseqR/compared_methods.R
|
e9c8f70f735ab0e24b2119309539707c71b4594b
|
[] |
no_license
|
Irbis3/crantasticScrapper
|
6b6d7596344115343cfd934d3902b85fbfdd7295
|
7ec91721565ae7c9e2d0e098598ed86e29375567
|
refs/heads/master
| 2020-03-09T04:03:51.955742
| 2018-04-16T09:41:39
| 2018-04-16T09:41:39
| 128,578,890
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,896
|
r
|
compared_methods.R
|
# Copyright (C) 2016 University of Southern California and
# Chao Deng and Andrew D. Smith and Timothy Daley
#
# Authors: Chao Deng
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
## zero truncated Poisson
## method ref Cohen, A. Clifford. (1960): 203-211.
ztpois.mincount <- function(n, r=1) {
total.sample <- floor(n[, 1] %*% n[, 2])
distinct <- sum(n[, 2])
C <- n[, 1] %*% n[, 2] / sum(n[, 2])
f <- function(x) {x / (1 - exp(-x))}
result = uniroot(function(x) f(x) - C, c(0.001, 1e9), tol = 0.0001, extendInt="upX")
lambda = result$root
L <- sum(n[, 2]) / (1 - ppois(0, lambda))
f.mincount <- function(t) {
L * ppois(q=r - 1, lambda=lambda * t, lower.tail=FALSE)
}
f.mincount(1); f.mincount
}
## Boneh (1998)
boneh.mincount <- function(n, r=1) {
total.sample <- floor(n[, 1] %*% n[, 2])
distinct <- sum(n[, 2])
tmp <- function(t) { sapply(r-1, function(x) {n[, 2] %*% (exp(-n[, 1]) - ppois(x, n[, 1] * t)) + distinct}) }
index.f1 <- which(n[, 1] == 1)
f1 <- n[index.f1, 2]
U0 <- n[, 2] %*% exp(-(n[, 1]))
if (length(index.f1) == 1 && f1 > U0) {
result <- uniroot(function(x) x*(1 - exp(-f1 / x)) - U0, c(0.001, 1e9), tol=0.0001, extendInt="upX")
U <- result$root
f.mincount <- function(t) {tmp(t) + sapply(r-1, function(x) {U * (exp(-(f1 / U)) - ppois(x, f1 * t / U))})}
} else {
f.mincount <- tmp
}
f.mincount(1); f.mincount
}
## Chao and Shen (2004)
chao.mincount <- function(n, r=1, k=10) {
total.sample <- floor(n[, 1] %*% n[, 2])
distinct <- sum(n[, 2])
index.f1 <- which(n[, 1] == 1)
## something wrong with the histogram
if (length(index.f1) != 1)
return(NULL)
f1 <- n[index.f1, 2]
index.rare <- which(n[, 1] <= k)
S.rare <- sum(n[index.rare, 2])
C.rare <- 1 - f1 / (n[index.rare, 1] %*% n[index.rare, 2])
gamma.rare <- max(S.rare / C.rare *
((n[index.rare, 1] * (n[index.rare, 1] - 1)) %*% n[index.rare, 2]) /
(n[index.rare, 1] %*% n[index.rare, 2])^2 - 1, 0)
f0 = S.rare / C.rare + f1 / C.rare * gamma.rare - S.rare
## not ppois(x, f1 * t / f0)
f.mincount <- function(t) { sapply(r-1, function(x) { f0 + distinct - f0 * ppois(x, f1 * t / f0) * exp(f1/f0) })}
f.mincount(1); f.mincount
}
|
283f4d7c61c857106b2aaf3eb2b55e7aa1dd3b84
|
1981c964d39a32be2c471c2106e081ed4e0270ba
|
/R/8.R
|
8ca04747f8c0eb9d3e49341444a2fc3c3dde2449
|
[] |
no_license
|
lg8897203/Rforlg
|
91b74c3c9447d881956f70d25e400a7f0b6c2789
|
2c3fb851c77d9623b353cc7b59486e11e564d3d3
|
refs/heads/master
| 2021-01-20T06:30:55.568506
| 2015-10-25T12:43:17
| 2015-10-25T12:43:17
| 44,785,861
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 988
|
r
|
8.R
|
y <- seq(0,1,0.05)
x <- seq(0,1,0.05)
SPIA <- function(x, y, r=1, a=0, h=5) {
S1 <- (54+105*x+41*x^2-10*x^3)*y/(6*r*(1+x)*(27+27*x-4*x^2))
S2 <- (162+495*x+496*x^2+155*x^3-8*x^4)*y*a/(2*h*r*(1+x)*(27+27*x-4*x^2)^2-2*y^2*(162+495*x+496*x^2+155*x^3-8*x^4))
P1 <- (9+15*x)*h/(9*(1+x))
P2 <- (9+11*x)*a/(27+27*x-4*x^2)
P3 <- (9+11*x)*y*2*S2/(27+27*x-4*x^2)
PA1 <- P1+P2+P3
PB1 <- P1-P2-P3
PA2 <- (3*h+a+y*2*S2+x*(PA1-PB1))/(3*(1+x))
PB2 <- (3*h-a-y*2*S2-x*(PA1-PB1))/(3*(1+x))
PA <- h+(h*r*a)/(3*h*r-y^2*x^2-y^2)
PB <- h-(h*r*a)/(3*h*r-y^2*x^2-y^2)
PS2 <- ((1+x)*y)/(2*h*r)
PS1 <- ((1-x)*y)/(2*h*r)
SA1 <- PS1*PA
SA2 <- PS2*PA
SB1 <- PS1*PB
SB2 <- PS2*PB
DA <- (1/h)*(h+(h*r*a)/(3*h*r-y^2*x^2-y^2))
PPIA <- PA*DA-(r/2)*(SA1^2+SA2^2)
PA1*(PB1-PA1+y*2*S2+a+h)/(2*h)+PA2*((y*2*S2+a+h)/(2*h)+(1+x)*(PB2-PA2)/(2*h)-x*(PB1-PA1)/(2*h))-r*(S1+S2)^2-PPIA
}
z <- outer(x,y,SPIA)
persp(x,y,z,theta=90,phi=0,expand=1,col="red",xlab = "¦È", ylab = "¦Á", zlab = "¦Ð", ticktype = "detailed")
|
c42ad9ae6ebbea2daed06155a2a1dfe0a924c41f
|
bc52b9eb2376f0e0f84da6bd8c770bc26300f4f7
|
/man/fast_sca.Rd
|
b7b5eb0bc25cb5018b15bea02133df33b3d29063
|
[] |
no_license
|
cran/svs
|
86299de68a22036cdbb59cfe1fdce38f4acd8cc7
|
bac0a7e5b4e48739b4273d5b9f69a3d79987fd58
|
refs/heads/master
| 2021-01-10T13:18:31.673174
| 2020-11-09T20:40:02
| 2020-11-09T20:40:02
| 48,089,639
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,225
|
rd
|
fast_sca.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/svs.r
\name{fast_sca}
\alias{fast_sca}
\title{Simple Correspondence Analysis}
\usage{
fast_sca(dat, transform = 1)
}
\arguments{
\item{dat}{Input data: can be a table or a data frame (but the data frame must have only two columns).}
\item{transform}{Numeric specification of the power transformation to be applied on the data.}
}
\value{
A list with components:
\item{\code{val} }{The eigenvalues or principal inertias, indicating how much each latent axis explains.}
\item{\code{pos1} }{The coordinates of the first set of levels (\emph{viz.} the row levels of a frequency table).}
\item{\code{pos2} }{The coordinates of the second set of levels (\emph{viz.} the column levels of a frequency table).}
}
\description{
A fast procedure for computing simple correspondence analysis.
}
\examples{
SndT_Fra <- read.table(system.file("extdata", "SndT_Fra.txt", package = "svs"),
header = TRUE, sep = "\t", quote = "\"", encoding = "UTF-8",
stringsAsFactors = FALSE)
sca.SndT_Fra <- fast_sca(SndT_Fra)
sca.SndT_Fra
}
\references{
Greenacre, M. (2017) \emph{Correspondence analysis in practice, Third edition}. Boca Raton: Chapman and Hall/CRC.
}
|
15801213bb057de3a61ad11d4480b62eda8161c2
|
5c81fc04db1f4fb7a4453d9772796d278bbcad5d
|
/Main.R
|
1911736176bb5cdaaf58809e831f49f47db5b183
|
[] |
no_license
|
Tennismylife/Tennis-R-ecord-Animation
|
0fb725dedcb82485db0e3d76ec8348cfb56a8d56
|
a8c0f94bcf90ff07be778a82ae574c3b3bdb6833
|
refs/heads/main
| 2023-04-16T00:26:58.101862
| 2021-05-03T00:55:52
| 2021-05-03T00:55:52
| 359,907,836
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 441
|
r
|
Main.R
|
library(tidyverse)
source("Reader.R")
source("Animator.R")
source("Formatter.R")
source("DataRetriever.R")
#Read database from csv
db <- ParallelReader()
#Get all data from a specific category with all winners tourney-by-tourney
M1000Roll <- Retrieve()
#Format the data to create a ordered and ranked dataframe
M1000RollFormatted <- Formatter(M1000Roll)
#Create an animation from your data
animation(M1000RollFormatted)
|
12f2acc267b554703c7cb32b9dee56ed1e97637a
|
cb57199836e5e5de9f597c13404e2a089285bc44
|
/R/leaders.R
|
31046b12f4f6c59081e008702ad183ec4c847be6
|
[
"MIT"
] |
permissive
|
kevinrue/BiocChallenges
|
57971f16cc828cfc657a52ab1291983adc595d7b
|
8f1a9628b7816c69876b9ffa3610fd0109bb6fa5
|
refs/heads/main
| 2023-08-28T09:53:56.134237
| 2021-10-31T21:29:41
| 2021-10-31T21:29:41
| 294,656,697
| 0
| 0
|
MIT
| 2020-09-11T15:39:06
| 2020-09-11T09:50:25
|
R
|
UTF-8
|
R
| false
| false
| 936
|
r
|
leaders.R
|
# Functions to process and display leaders.
#' Challenge Leaders
#'
#' @param params Challenge parameters as `list`.
#'
#' @return
#' `format_leaders()` returns a character value indicating the challenge leaders.
#' @export
#'
#' @examples
#' params <- list(leaders = list(kevinrue = "Kevin Rue-Albrecht"))
#' cat(format_leaders(params), sep = "\n")
format_leaders <- function(params) {
challenge_leaders <- params$leaders
if (is.null(challenge_leaders)) {
stop("Challenge leaders are missing")
}
challenge_leaders <- unlist(challenge_leaders)
challenge_leaders <- mapply(.format_github_user, github = names(challenge_leaders), name = challenge_leaders)
challenge_leaders <- paste0(challenge_leaders, collapse = "\n")
challenge_leaders
}
.format_github_user <- function(github, name) {
sprintf('- %s - <i class="fab fa-github"></i> [%s](https://github.com/%s)', name, github, github)
}
|
0951be754aba577bec2fdc882c93d1b9af5f2fc2
|
ab642017b5d96153f17712652a7d5d5f59037187
|
/elrapack/R/getLmat.R
|
930a4b0c7b44845d973911eb81b322328362b2f0
|
[] |
no_license
|
maj-biostat/elra-biostats
|
4cc698dfbf262b15a86edf91431d3a9afcb00d58
|
0021579c0ab022fa4909fc2c70fdba063a3e9052
|
refs/heads/master
| 2021-09-09T19:57:39.655242
| 2018-03-19T10:47:14
| 2018-03-19T10:47:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,005
|
r
|
getLmat.R
|
#' Helper functions for creation of Lag-Lead matrices
#'
#' @rdname LagLead
#' @keywords internal
#' @export
createLmatDyn <- function(
lead = 4,
lag.c = 4,
lag.f = 2,
n.days = 11,
brks = c(0:11, seq(15, 55, by = 5), 61.1), time.shift = 4) {
time.seq <- seq_len(n.days)
if ( is.null(lag.c) ) {
lag.c <- max(brks)
lag.f <- 0
}
lag.vec <- lag.f * time.seq + lag.c + time.seq
lead.vec <- time.seq + lead
lag.mat <- sapply(lag.vec, function(z) z > brks[-length(brks)])
lead.mat <- sapply(lead.vec, function(z) z <= brks[-1] )
L <- (lead.mat & lag.mat) * 1
Lsub <- L[-c(seq_len(time.shift), length(brks)), ]
Lsub
}
#' @rdname LagLead
#' @keywords internal
#' @export
t_lead <- function(te, te.add.lead=0, lead.const=4, lead.factor=2) {
lead.const + (te + te.add.lead)*lead.factor
}
#' @rdname LagLead
#' @keywords internal
#' @export
lag_lead_df <- function(
te = 1:12,
te.add.lag = 0,
t.lag = 4,
te.add.lead = 0,
lead.const = 4,
lead.factor = 2,
interval.seq = c(0L:12L, seq(15, 55, by = 5), 61),
labels = TRUE) {
lead = t_lead(te, te.add.lead = te.add.lead, lead.const = lead.const,
lead.factor = lead.factor)
w.begin = (te + te.add.lag) + t.lag
w.end = get_end(w.begin, lead, max.end=max(interval.seq))
data.frame(
te = te,
lag = t.lag,
lead = lead,
w.begin = w.begin,
w.end = w.end,
int.begin = get_interval(w.begin, interval.seq=interval.seq, labels = labels),
int.end = get_interval(w.end, interval.seq=interval.seq, labels = labels)
)
}
#' @rdname LagLead
#' @keywords internal
#' @export
get_end <- function(start, lead, max.end) {
end <- start + lead
end <- ifelse(end > max.end, max.end, end)
}
#' @rdname LagLead
#' @keywords internal
#' @export
get_interval <- function(
x,
interval.seq = c(0L:12L, seq(15, 55, by = 5), 61),
labels = FALSE) {
ind <- cut(x, interval.seq, labels=FALSE)
# if(any(is.na(ind))) ind[is.na(ind)] <- max(ind, na.rm=TRUE)
if(labels) {
ind <- int_info2(min.int=0, brks=interval.seq)[ind, "interval"]
}
ind
}
#' @rdname LagLead
#' @keywords internal
#' @export
int_info2 <- function(
brks = c(0:12, seq(15, 55, by=5), 61),
min.int = 4) {
intlen <- diff(brks)
tstart <- c(0, cumsum(intlen)[-length(intlen)])
tend <- tstart + intlen
tdf <- data.frame(
tstart = tstart,
tend = tend,
intlen = intlen)
tdf <- dplyr::mutate(tdf, intmid = tstart + intlen/2)
tdf$interval <- paste0("(", tdf$tstart, ",", tdf$tend, "]")
tdf$interval <- factor(tdf$interval, levels=tdf$interval, labels=tdf$interval)
ind.keep <- which(tstart >= min.int)
subset(tdf, tstart >= min.int)
}
#' creates one instance of Lag/Lead mat
#' @param te Numeric/Integer vector specifying the times at which exposure occurred.
#' @param te.add.lag A numeric constant added to te before application of lag time
#' @param t.lag A numeric constant, specifying the time (from \code{te}) before
#' \code{te} can affect hazard.
#' @param te.add.lead A numeric constant, added to te before application of lead time.
#' @param lead.const A numeric constant, specifying the constant amount of time
#' after \code{te + t.lag}, in which \code{te} can still affect hazard.
#' @param lead.factor If the lead time is dynamic, this factor can be set different
#' to zero, such that \code{t.lead=lead.const + lead.factor*te}.
#' @param interval.seq The break points dividing the follow up into intervals.
#' @param t.min If some intervals are not of interest only intervals for t > t.min are
#' returned.
#' @import checkmate dplyr
#' @return A data frame with intervals as first column and \code{length(te)}
#' columns specifying the lag/lead for each \code{te}.
#' @keywords internal
#' @export
create_Lmat <- function(
te = 1:12,
te.add.lag = 0,
t.lag = 4,
te.add.lead = 0,
lead.const = 4,
lead.factor = 2,
interval.seq = c(0:12, seq(15, 55, by = 5), 61.1),
t.min = 0) {
assert_integer(te, lower=1, any.missing=FALSE, unique=TRUE)
assert_numeric(interval.seq, lower=0, any.missing=FALSE, min.len=2)
assert_number(te.add.lag, lower=0, upper=max(interval.seq), finite=TRUE)
assert_number(t.lag, lower=1, upper=max(interval.seq), finite=TRUE)
assert_number(lead.const, lower=0, upper=max(interval.seq), finite=TRUE)
assert_number(lead.factor, lower=0, upper=max(interval.seq), finite=TRUE)
assert_number(t.min, lower=0, upper=max(interval.seq), finite=TRUE)
# create lag-lead information matrix
ldf <- lag_lead_df(te=te, te.add.lag=te.add.lag, te.add.lead=te.add.lead,
t.lag=t.lag, lead.const=lead.const, lead.factor=lead.factor,
interval.seq=interval.seq)
ind.begin <- get_interval(ldf$w.begin, interval.seq=interval.seq)
ind.end <- get_interval(ldf$w.end, interval.seq=interval.seq)
int.info <- int_info2(brks=interval.seq, min.int=0)
int.keep <- int.info$interval[which(int.info$tstart >= t.min)]
ints <- apply(cbind(ind.begin, ind.end), 1, function(z) {
z.i <- int.info$interval[z[1]:z[2]]
int.info$interval %in% z.i
}) * 1
ints <- data.frame(intsL=int.info$interval, Lcols=ints)
filter(ints, intsL %in% int.keep)
}
##' creates Lag/Lead matrix for all observations in a data set
#' @param data The complete data set.
#' @param Lmat A data frame where first column specifies intervals and the other
# columns the Lag/Lead structure for each day of exposure.
#' @param merge.col.data The name of the column (in data) on which data and Lmat
# should be merged. Defaults to \code{"int"}.
#' @param merge.col.Lmat The name of the column (in Lmat) on which data and Lmat
#' should be merged. Defaults to \code{"intsL"}
#' @details:
#' Lmat, that only contains information for all unique intervals and all days of
#' exposure, is merged with data, such that correct rows of Lmat are added to the
#' data, but only the Lmat matrix (now with as many rows as data) is returned.
#' @importFrom stats setNames
#' @importFrom dplyr left_join
#' @import checkmate
#' @keywords internal
#' @export
Lmat_data <- function(
data,
Lmat,
merge.col.data = "int",
merge.col.Lmat = "intsL") {
# check inputs
assert_data_frame(data)
assert_data_frame(Lmat)
assert_set_equal(levels(data[[merge.col.data]]), levels(Lmat[[merge.col.Lmat]]))
nrow.data <- nrow(data)
int <- data["int"]
rm(data)
int <- left_join(int, Lmat, by=setNames(merge.col.Lmat, merge.col.data))
if(!(nrow(int)==nrow.data)) {
stop("Left join not successful, number of rows produced no equal to number of
rows in data")
}
# return int, -1 because we only want the Lmat, not the intervals
# this is necessary because we need to store L as a (numeric) matrix in the
# data for it to be processed properly by mgcv::gam
as.matrix(int[,-1])
}
#' heatmap of the effect terms for relevant L-Areas
#'
#' @import ggplot2
#' @importFrom reshape2 melt
#' @keywords internal
#' @export
heatAdequacy <- function(
hm = NULL,
day.names = 1:11,
int.names = NULL,
high.col = "steelblue",
low.col = "white",
grid.col = "lightgrey",
title.char = "") {
## hm: heat matrix containing the values to be ploted via heatmap()
m.hm <- melt(hm)
m.hm$Var1 <- factor(m.hm$Var1, labels = int.names)
m.hm$Var2 <- factor(m.hm$Var2, labels = day.names)
ggplot(m.hm, aes(x = Var2, y = rev(Var1))) +
geom_tile(aes(fill = value), colour = grid.col) +
scale_fill_gradient(low = low.col, high = high.col) +
xlab("Protocol day") +
scale_y_discrete("Interval j", labels = rev(int.names)) +
theme(legend.position = "none") +
labs(title = title.char)
}
|
8c2296c62d111006a9e11a7f903ef31b728cfe34
|
4e1e5ca97aa52682d72e051e8aacd8a420aa2f41
|
/tests/testthat/test-scatterplot_penguins.R
|
d677f11ce590025c02f20bf4504dc4095e42624e
|
[
"MIT"
] |
permissive
|
2DegreesInvesting/ds.testing
|
84cffd90bb660ad7289abac0a2dacd14834fd86e
|
e4f0cb53be747aaa7fab90b915048602b67be03a
|
refs/heads/master
| 2023-04-22T13:12:16.718512
| 2021-05-11T12:18:07
| 2021-05-11T12:18:07
| 352,852,572
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 203
|
r
|
test-scatterplot_penguins.R
|
test_that("hasn't changed", {
skip_on_os("mac")
skip_on_os("windows")
data <- na.omit(palmerpenguins::penguins)
p <- scatterplot_penguins(data)
p$plot_env <- NULL
expect_snapshot(str(p))
})
|
245151f0e25b9bb430d11fd3537d4eab5c2db00c
|
88ebbada30ec62db655b56dd641ac844223660c0
|
/Exercise_7.R
|
011e48b65ae5a61ce5cfa5866beb276854805f8f
|
[] |
no_license
|
smasca/Biocomputing2020_Tutorial09
|
71c28aee70adfc60648a0a549965f826f21abcbf
|
7faaa0d6afaa6664023eb352a0eabea12b4a668e
|
refs/heads/main
| 2022-12-26T04:18:28.511703
| 2020-10-13T19:19:47
| 2020-10-13T19:19:47
| 302,665,132
| 0
| 0
| null | 2020-10-09T14:27:52
| 2020-10-09T14:27:51
| null |
UTF-8
|
R
| false
| false
| 1,597
|
r
|
Exercise_7.R
|
# Samantha Masca
# BIOS 30318
# Exercise 7
# TA: Elizabeth Brooks
#Task 1: replicating the functionality of the head function in Linux to output the top n lines
setwd ("/Users/samanthamasca/Biocomputing2020_Tutorial09/")
#variable name of the file
data <- read.table(file='wages.csv',sep=',',header=TRUE,stringsAsFactors = FALSE)
#variable integer for number of lines
n <- 9
#output
data[1:n,]
# Task 2: load iris.csv and do the following things
# load iris.csv file
setwd ("/Users/samanthamasca/Biocomputing2020_Tutorial09/")
iris <- read.table(file='iris.csv',sep=',',header=TRUE,stringsAsFactors = FALSE)
# print the last 2 rows in the last 2 columns to the R terminal
iris[149:150,c(4,5)]
# get the number of observations for each species included in the data set
# number of virginica observations
nrow(iris[iris$Species=="virginica",])
# number of setosa observations
nrow(iris[iris$Species=="setosa",])
# number of versicolor observations
nrow(iris[iris$Species=="versicolor",])
# get rows with Sepal.Width > 3.5
iris[iris$Sepal.Width>3.5,]
# write the data for the species setosa to a comma-delimited file names ‘setosa.csv’
setosa <- iris[iris$Species=="setosa",]
write.table(setosa, "setosa.csv", row.names = FALSE, sep=',')
# calculate the mean, minimum, and maximum of Petal.Length for observations from virginica
virginicaData = iris[iris$Species=="virginica",]
# mean Petal.Length of virginica
mean(virginicaData$Petal.Length)
# minimum Petal.Length of virginica
min(virginicaData$Petal.Length)
# maximum Petal.Length of virginica
max(virginicaData$Petal.Length)
|
d1d57b4113410971e09244bd05758fb6ce0fad16
|
959b8d01689825ce765ef1f783c579c43831d9a9
|
/R학습파일/logi_ex2.R
|
fd47dd6d3617ce6d01a51454600f069411e50368
|
[] |
no_license
|
leeyouhee/R2
|
9f7117e2b99f37ad1ef9bf2e4242c21468196629
|
a7f448247d81ecaea148703b4ffa2be2aaa54ea7
|
refs/heads/master
| 2022-12-10T20:41:48.616158
| 2020-09-01T03:37:10
| 2020-09-01T03:37:10
| 283,909,285
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 412
|
r
|
logi_ex2.R
|
########## 예제 mtcars ##########
# mtcars 데이터셋 이용 아래와 같이 작성해보세요 :)
#sqld 315 page
data(mtcars)
head(mtcars)
library(dplyr)
str(mtcars)
View(mtcars)
# 1) 데이터 준비
df <- mtcars %>%
dplyr::select(mpg,vs,am)
heda(df)
# 2) 8:2 데이터 나누기
# 3) 종속변수 vs, 독립변수 mpg + am로 모형 적합
# 4) 예측
# 5) 변수 선택
step
# 6) 결과 비교
|
06b4b7fd468c680c0b517cf3c58e5985773d7686
|
1415a07510acfe50a8bcb6d1377ea98fab20c63e
|
/boolType.R
|
c806f7038950471b9dccdb241274112cdef46ae5
|
[] |
no_license
|
00mjk/interOp-1
|
9f8b5db42fad60e34e9bff36319acf0e9db4d8bc
|
2cc1f59a9a31b3a7339f2a42a2386cdadee30233
|
refs/heads/master
| 2021-09-28T00:14:07.785145
| 2018-11-12T01:44:02
| 2018-11-12T01:44:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 348
|
r
|
boolType.R
|
#
# Henry Samuelson
#
# Bool Data Type interpreter
processBool <- function(x){
#First check the bool type true/false
# Then convert to universal T/F
if(x == "True" || x == "true" || x == "T"){
x <- "T"
} else if( x == "False" | x == "false" || x == "F"){
x <- "F"
}
# Now that we have standardized the input data we can
}
|
05fc04944b2850c14dcaebc42378e9924f595d5b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/RSiteCatalyst/examples/GetElements.Rd.R
|
8bc73367e41d733bd584a323f045d0d7202937b1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 556
|
r
|
GetElements.Rd.R
|
library(RSiteCatalyst)
### Name: GetElements
### Title: Get Valid Elements for a Report Suite
### Aliases: GetElements
### ** Examples
## Not run:
##D elements.valid <- GetElements("your_report_suite",
##D metrics=c('visitors','pageviews'),
##D elements=c('page','geoCountry'),
##D date.granularity='day',
##D report.type='')
##D
##D elements <- GetElements(c("your_prod_report_suite","your_dev_report_suite"))
## End(Not run)
|
491fac7e6f32b1bda7944c8b0e5b165ef9631025
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/RcmdrPlugin.IPSUR/examples/birthday.ipsur.Rd.R
|
7ad88694bd981eb8be84450419c77317d21ad4f9
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 579
|
r
|
birthday.ipsur.Rd.R
|
library(RcmdrPlugin.IPSUR)
### Name: birthday.ipsur
### Title: Probability of coincidences for the IPSUR package
### Aliases: qbirthday.ipsur pbirthday.ipsur
### Keywords: distribution
### ** Examples
## the standard version
qbirthday.ipsur()
## same 4-digit PIN number
qbirthday.ipsur(classes=10^4)
## 0.9 probability of three coincident birthdays
qbirthday.ipsur(coincident=3, prob=0.9)
## Chance of 4 coincident birthdays in 150 people
pbirthday.ipsur(150,coincident=4)
## 100 coincident birthdays in 1000 people: *very* rare:
pbirthday.ipsur(1000, coincident=100)
|
0dd2c90d80b7db5da9fb12fcf266c8a173c77fbb
|
6cbc6e80ae07b8fb1fff0a5cad4ddcd29c358c0a
|
/man/ezr.impute.Rd
|
ae1c0bc4a8a5850bee0c5a649bc3386fa7b3b76e
|
[] |
no_license
|
lenamax2355/easyr
|
d99638b84fd9768774fa7ede84d257b10e0bacf6
|
37ab2fe5c28e83b9b5b3c0e3002f2df45708016b
|
refs/heads/master
| 2022-01-09T20:43:17.801623
| 2019-05-13T02:49:48
| 2019-05-13T02:49:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 799
|
rd
|
ezr.impute.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ezr_impute.R
\name{ezr.impute}
\alias{ezr.impute}
\title{Impute values}
\usage{
ezr.impute(dataset, use_mean = FALSE, use_median = TRUE,
only_columns = NULL, adjust_chars = FALSE, exclude_columns = NULL)
}
\arguments{
\item{dataset}{Dataset}
\item{use_mean}{Impute with the mean value?}
\item{use_median}{Impute with the median value? This is the default value}
\item{only_columns}{Just look at these specific columns}
\item{adjust_chars}{Should character/factor columns be imputed with mode? Default is FALSE}
\item{exclude_columns}{Do not adjust these columns at at all.}
}
\description{
Impute values in your dataframe on missings. Pick either mean or median for numericals. Mode is used for categoricals.
}
|
74910537b52d0a9107c83a27f8d23f4b8e97d3d6
|
96c937d616a7235e2af970bea6307cfca3cc7a4e
|
/prob5/suramrit_server.R
|
fc81a1c390e304b64cbe7bbddb94f16d4285fa82
|
[] |
no_license
|
suramrit/Twitter-Analysis-using-R
|
6199eb47bea6c7eaa9bbff79da9b9930f394f8ff
|
a666aeb7fb1b99877fbf856556fee18d0a7f755d
|
refs/heads/master
| 2021-01-21T13:14:43.435770
| 2016-05-12T15:00:59
| 2016-05-12T15:00:59
| 55,788,942
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,032
|
r
|
suramrit_server.R
|
library(shiny)
library(ggplot2)
library(ggmap)
# Define server logic for random distribution application
shinyServer(function(input, output, session) {
# Reactive expression to generate the requested distribution.
# This is called whenever the inputs change. The output
# functions defined below then all use the value computed from
# this expression
autoUpdate <- reactiveTimer(7500, session)
data <- reactive({
choice <- switch(input$choice,
day = daily_mentions,
week = weekly_mentions)
choice(input$n)
})
# Generate a plot of the data. Also uses the inputs to build
# the plot label. Note that the dependencies on both the inputs
# and the data reactive expression are both tracked, and
# all expressions are called in the sequence implied by the
# dependency graph
output$plot1 <- renderPlot({
autoUpdate()
choice <- get(input$choice)
#will have the choice here ..
if(identical(choice, weekly_mentions))
{
for (i in 1:length(temp)){
file_name = paste0(c('primary','_',i),collapse = "")
day_name = paste0(c('day','_',i,'_mentions'),collapse = "")
#assign(file_name, read.xls(temp[i],quote = NULL,header=TRUE ))
#assign(file_name, parseTweets(temp[i], simplify = FALSE))
assign(day_name, data.frame(candidate= factor(), num = factor()))
temp2 <- get(file_name)
temp3 <- get(day_name)
for(j in candidates){
sub <- temp2[grep(j, temp2$text), ]
temp3<- rbind(temp3, data.frame(day=i,candidate = j,num=nrow(sub)))
}
day_mentions <- rbind(day_mentions,temp3)
primary_data <- rbind(primary_data, temp2)
}
#have all tweet data at this point..
weekly_mentions <- data.frame(candidate = factor(), num = factor())
# tweet mentions for each popular candidate
for(i in candidates){
sub2 <- primary_data[grep(i, primary_data$text), ]
weekly_mentions<- rbind(weekly_mentions, data.frame(candidate = i,num=nrow(sub2)))
}
ggplot(choice, aes(choice$candidate,choice$num)) + geom_bar(stat= "identity")
}
else if(identical(choice, day_mentions)){
ggplot(choice, aes(choice$day, choice$num, color= choice$candidate, fill= choice$candidate))+geom_bar(stat='identity')
}
else{
Sys.sleep(10)
file.remove("new_tweets.json")
filterStream(file.name = "new_tweets.json", # Save tweets in a json file
track = c("primary election","us election","trump","donald trump",
"hilary clinton", "clinton","democrats",
"republicans","sanders","bernie sanders",
"caucuses result","nevada primary",
"south carolina primary","iowa primary","new hampshire result",
"presidential election us","republican result",
"democrat result","primaries delegate result",
"new york times bernie sanders","new york times hilary clinton","new york times donald trump",
"fox news bernie sabders","fox news hilary","fox news donald trump",
"huffington post bernie sanders","huffington post hilary clinton","huffington post donald trump",
"cnbc bernie sanders","cnbc donald trump","cnbc hilary clinton",
"bloomberg bernie sanders","bloomberg hilary clinton","bloomberg donald trump",
"bbc bernie sanders","bbc hsilary clinton","bbc donald trump",
"new york times primary elections","fox news primary elections", "cnbc primary elections","bloomberg primary elections"),
language = "en",
timeout = 1,
oauth = my_oauth)
new_tweets <- data.frame()
new_tweets <- parseTweets("new_tweets.json", simplify = FALSE)
new_tweets <- new_tweets [1:100,]
new_mentions <- data.frame(candidate = factor(), num = factor())
# tweet mentions for each popular candidate
for(i in candidates){
sub2 <- new_tweets[grep(i, new_tweets$text), ]
new_mentions<- rbind(new_mentions, data.frame(candidate = i,num=nrow(sub2)))
}
ggplot(new_mentions, aes(new_mentions$candidate,new_mentions$num)) + geom_bar(stat= "identity")
}
})
output$plot2 <- renderPlot({
autoUpdate()
choice <- get(input$choice)
#will have the choice here ..
if(identical(choice, weekly_mentions))
{
ggplot() + borders("world", colour="gray50", fill="gray50") +geom_point(aes(x=visit.x, y=visit.y) ,color="blue", size=1.5)
}
else if(identical(choice , day_mentions)){
ggplot() + borders("world", colour="gray50", fill="gray50") + geom_point(aes(x=visit_1.x, y=visit_1.y) ,color="blue", size=1.5)+
geom_point(aes(x=visit_2.x, y=visit_2.y) ,color="red", size=1.5) +
geom_point(aes(x=visit_3.x, y=visit_3.y) ,color="green", size=1.5) +
geom_point(aes(x=visit_4.x, y=visit_4.y) ,color="orange", size=1.5) +
geom_point(aes(x=visit_5.x, y=visit_5.y) ,color="black", size=1.5) +
geom_point(aes(x=visit_6.x, y=visit_6.y) ,color="yellow", size=1.5)
}
else {
Sys.sleep(10)
file.remove("new_tweets.json")
filterStream(file.name = "new_tweets.json", # Save tweets in a json file
track = c("primary election","us election","trump","donald trump",
"hilary clinton", "clinton","democrats",
"republicans","sanders","bernie sanders",
"caucuses result","nevada primary",
"south carolina primary","iowa primary","new hampshire result",
"presidential election us","republican result",
"democrat result","primaries delegate result",
"new york times bernie sanders","new york times hilary clinton","new york times donald trump",
"fox news bernie sabders","fox news hilary","fox news donald trump",
"huffington post bernie sanders","huffington post hilary clinton","huffington post donald trump",
"cnbc bernie sanders","cnbc donald trump","cnbc hilary clinton",
"bloomberg bernie sanders","bloomberg hilary clinton","bloomberg donald trump",
"bbc bernie sanders","bbc hsilary clinton","bbc donald trump",
"new york times primary elections","fox news primary elections", "cnbc primary elections","bloomberg primary elections"),
language = "en",
timeout = 1,
oauth = my_oauth)
new_tweets <- data.frame()
new_tweets <- parseTweets("new_tweets.json", simplify = FALSE)
new_tweets <- new_tweets [1:100,]
new_mentions <- data.frame(candidate = factor(), num = factor())
# tweet mentions for each popular candidate
for(i in candidates){
sub2 <- new_tweets[grep(i, new_tweets$text), ]
new_mentions<- rbind(new_mentions, data.frame(candidate = i,num=nrow(sub2)))
}
new_visited <- data.frame()
new_ll.visited <- data.frame()
new_visit.x<- data.frame()
new_visit.y<- data.frame()
new_visited <- subset(new_tweets, !(is.na(new_tweets$location)))$location[1:10]
new_ll.visited <- geocode(new_visited)
new_visit.x <- new_ll.visited$lon
new_visit.y <- new_ll.visited$lat
ggplot() + borders("world", colour="gray50", fill="gray50") +
geom_point(aes(x=new_visit.x, y=new_visit.y) ,color="white", size=1.5)
}
})
})
|
b89f44461dbe09ed9cf941edbb17a1dac0dca3c0
|
2802979852f33dc4336c0e0fbc6a601a928efc5e
|
/R/initialize.R
|
ff6a7f43b9ecdd5ff8b85708f365af812a52cd90
|
[] |
no_license
|
cran/netgwas
|
05ee21591f4bc89b295b4d7d6754ec9fb5cc7225
|
e661e37640b335d4fa515f03411e08bb12b795fa
|
refs/heads/master
| 2023-08-31T22:02:45.223899
| 2023-08-07T14:40:02
| 2023-08-07T16:35:15
| 112,773,132
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,783
|
r
|
initialize.R
|
#-------------------------------------------------------------------------------#
# Package: Network-Based Genome-Wide Association Studies #
# Author: Pariya Behrouzi #
# Emails: <pariya.Behrouzi@gmail.com> #
# Date: Nov 21th 2017 #
#-------------------------------------------------------------------------------#
initialize = function(y, rho = NULL, n_rho = NULL, rho_ratio = NULL, ncores=NULL )
{
p <- ncol(y)
n <- nrow(y)
lower_upper = lower.upper(y)
if(is.null(rho))
{
if(is.null(n_rho)) n_rho = 10
if(is.null(rho_ratio)) rho_ratio = 0.3
cr = cor(y, method="spearman") - diag(p)
cr[is.na(cr)] <- 0
rho_max = max( max(cr),-min(cr) )
if(rho_max == 0)
{
ty <- npn(y, npn.func= "shrinkage")
cr = cor(ty, method="spearman") - diag(p)
rho_max = max(max(cr),-min(cr))
}
if(rho_max >= .7) rho_max = .7
rho_min = rho_ratio * rho_max
rho = exp(seq(log(rho_max), log(rho_min), length = n_rho))
rm(cr, rho_max, rho_min, rho_ratio)
gc()
}
## Initialize S
Z <- matrix(0, n, p)
diag_element <- rep(0, p)
if(ncores > 1)
{
cl <- makeCluster(ncores)
tmp2 <- parLapply(cl = cl, 1:p, function(i) {
element_S_j(i, lower_upper );
})
stopCluster(cl)
}else{
tmp2 <- lapply(1:p, function(i){ element_S_j(i, lower_upper );})
}
Z <- do.call(cbind, lapply(1:p, function(x) tmp2[[x]]$EX ))
diag_element <- unlist(lapply(1:p, function(x)mean(tmp2[[x]]$EXX)))
ES <- t(Z) %*% Z / n
diag(ES) <- diag_element
rm(tmp2, diag_element)
gc()
return(list(Z=Z, ES = ES, rho = rho, lower_upper=lower_upper))
}
|
b698ee1781fd56884590505bfedec98ed894de52
|
dbfe5ce272e204a8e1663ced35c9d48ef4870496
|
/R/statistic.R
|
7c1057f68a39216b944a9a4736c543793020cf5f
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
hmito/hmRLib
|
fac91a4e2ddfcd899283ec0b63c87c31965fb17f
|
f2cfd54ea491ee79d64f7dd976a94086092b8ef5
|
refs/heads/master
| 2023-08-31T07:21:31.825394
| 2023-08-28T10:02:07
| 2023-08-28T10:02:07
| 41,907,654
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,862
|
r
|
statistic.R
|
#' Return vector of the mid points of the argument
#' @description Create the sequence of the mid points of vector, i.e., (x[-1]+x[-length(x)])/2
#' @param x terget vector
#' @return vector of mid points
#' @export
#' @examples
#' x = c(0.0,0.2,0.6,1.2)
#' ans = mids(x)
#' # ans == c(0.1,0.4,0.9)
mids = function(x){
(x[-1]+x[-length(x)])/2
}
#' Return mean value of the probability distribution
#' @description Calculate the mean value of the probability distribution or histgram data.
#' @param x axis value
#' @param pd probability distribution or histgram data
#' @return mean value of the given pd
#' @export
pd.mean = function(x,pd){
sum(x*pd)/sum(pd)
}
#' Return variance of the probability distribution
#' @description Calculate the variance of the probability distribution or histgram data.
#' @param x axis value
#' @param pd probability distribution or histgram data
#' @return variance of the given pd
#' @export
pd.var = function(x,pd){
(sum(x*x*pd)/sum(pd)) - (sum(x*pd)/sum(pd))^2
}
#' Return skewness of the probability distribution
#' @description Calculate the skewness of the probability distribution or histgram data.
#' @param x axis value
#' @param pd probability distribution or histgram data
#' @return skewness of the given pd
#' @export
pd.skewness = function(x,pd){
mean = pd.mean(x,pd)
var = pd.var(x,pd)
return((sum(x*x*x*pd)/sum(pd) - 3*mean*sum(x*x*pd)/sum(pd) + 2*mean^3)/var^1.5)
}
#' Return median of the probability distribution
#' @description Calculate the median of the probability distribution or histgram data.
#' @param x axis value
#' @param pd probability distribution or histgram data
#' @return median of the given pd
#' @export
pd.median = function(x,pd){
cpd = cumsum(pd)/sum(pd)
ui = (1:length(x))[cpd>0.5][1]
li = max(1,ui - 1)
return((x[li]*(cpd[ui]-0.5)+x[ui]*(0.5-cpd[li]))/(cpd[ui]-cpd[li]))
}
|
7fbb8c80df3372fc4bbff7dd22ba6a54f57a2ee3
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/chipPCR/examples/CD75.Rd.R
|
0db5d51579943179efd21e6c506dff69c8ca8283
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 208
|
r
|
CD75.Rd.R
|
library(chipPCR)
### Name: CD75
### Title: Helicase Dependent Amplification in the VideoScan HCU
### Aliases: CD75
### Keywords: datasets
### ** Examples
data(CD75)
## maybe str(CD75) ; plot(CD75) ...
|
6f60d2a71678bec7d58cdf104adc77158a4dfec4
|
2ea4c931d915e650fa40af46d36d8c4c1abc7f29
|
/run_analysis.R
|
e139dd1eb645a5a9d661b689ec109dbd283c0b74
|
[] |
no_license
|
muhsalem/Coursera-Peer-graded-Assignment-Getting-and-Cleaning-Data-Course-Project
|
b0112ff49f45501703f75d0c476b997b811bfe00
|
1c302a9c6f9f2498d91834bd1f6010bbedf19762
|
refs/heads/master
| 2020-03-19T23:06:24.840177
| 2018-06-11T19:47:13
| 2018-06-11T19:47:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,321
|
r
|
run_analysis.R
|
## DATA CLEANING PROJECT
## Loads the dplyr package which will be needed in this script
library(dplyr)
## **********************************************************************************************
## Part 1. Merges the training and the test sets to create one data set.
## **********************************************************************************************
## Reads the training dataset, labels and subjects files into three dataframes
train_set <- read.table("./UCI HAR Dataset/train/X_train.txt",header=FALSE)
train_labels <- read.table("./UCI HAR Dataset/train/y_train.txt",header=FALSE)
train_subject <- read.table("./UCI HAR Dataset/train/subject_train.txt",header=FALSE)
## Merges the training data into one dataframe
train <- cbind(train_subject,train_labels,train_set)
## Reads the training dataset, labels and subjects files into three dataframes
test_set <- read.table("./UCI HAR Dataset/test/X_test.txt",header=FALSE)
test_labels <- read.table("./UCI HAR Dataset/test/y_test.txt",header=FALSE)
test_subject <- read.table("./UCI HAR Dataset/test/subject_test.txt",header=FALSE)
## Merges the training data into one dataframe
test <- cbind(test_subject,test_labels,test_set)
## Merges the training and test data
data <- rbind(train,test)
## Assigns variable names to the train and test data
## This is done by first reading the features names from the features.txt file in a valid format
features_names <- read.table("./UCI HAR Dataset/features.txt",header=FALSE)
valid_features_names <- make.names(names=features_names$V2, unique=TRUE, allow_ = TRUE)
names(data) <- c("subject","activity",valid_features_names)
## **********************************************************************************************
## Part 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## **********************************************************************************************
## Using the dplyr package, selects the subject, activities and appropriate columns mean and std
## calculated columns. The "." before and after mean and std ensures the selection of only those
## columns for which mean and std has been calculated
clean_data <- select(data,subject,activity,contains(".mean."),contains(".std."))
## **********************************************************************************************
## Part 3. Uses descriptive activity names to name the activities in the data set
## **********************************************************************************************
## Reads the activity labels table storing it into a dataframe
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")
## Looks up and replaces the activity number with the appropriate label
clean_data$activity <- activity_labels$V2[match(clean_data$activity,activity_labels$V1)]
## **********************************************************************************************
## Part 4. Appropriately labels the data set with descriptive variable names.
## **********************************************************************************************
## Adds descriptive names by replacing the original column names
names(clean_data)[3:68] <- gsub("^t","Time_",names(clean_data)[3:68])
names(clean_data)[3:68] <- gsub("^f","Frequency_",names(clean_data)[3:68])
names(clean_data)[3:68] <- gsub("Acc","_Acceleration",names(clean_data)[3:68])
names(clean_data)[3:68] <- gsub("Gyro","_Gyroscope",names(clean_data)[3:68])
names(clean_data)[3:68] <- gsub("Mag","_Magnitude",names(clean_data)[3:68])
## Writes the dataframe into a .txt file
clean_data %>% write.table("clean_data.txt",row.name=FALSE)
## **********************************************************************************************
## Part 5. From the data set in step 4, creates a second, independent tidy data set with the
## average of each variable for each activity and each subject.
## **********************************************************************************************
## Groups by subject and activities, then summarizes the dataframe by averages of variables
summarized_data <- clean_data %>% group_by(subject,activity) %>% summarize_all(funs(mean))
## Writes the dataframe into a .txt file
summarized_data %>% write.table("summarized_data.txt",row.name=FALSE)
|
ed035e7c179c9d0993b6ccebae5586fec7f8d7c7
|
767beb025b7bb92ad0fba01fb66f470d3a48b5c6
|
/R/GrammaticalEvolution.R
|
2c4d6025d9e6c7e9f13fb307d246e75d0af7b2f7
|
[] |
no_license
|
ramcqueary/gramEvol3
|
cf46ba9b88d751899b630c37ca5676e6e4a21327
|
1704cd06723402e38911f37d490da790b48b420a
|
refs/heads/master
| 2021-09-23T18:58:03.400514
| 2021-09-12T03:09:15
| 2021-09-12T03:09:15
| 249,277,554
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,105
|
r
|
GrammaticalEvolution.R
|
function (grammarDef, evalFunc, numExpr = 1, max.depth = GrammarGetDepth(grammarDef), startSymb = GrammarStartSymbol(grammarDef), seqLen = GrammarMaxSequenceLen(grammarDef, max.depth, startSymb), wrappings = 3, suggestions = NULL, optimizer = c("auto", "es", "ga"), popSize = "auto", newPerGen = "auto", elitism = 2, mutationChance = NA, iterations = "auto", terminationCost = NA, monitorFunc = NULL, disable.warnings = FALSE, plapply = lapply, ...)
{
if (numExpr < 1) {
stop("Number of Expressions (numExpr) has to be at least 1.")
}
chromosomeLen <- seqLen * numExpr
optimizer <- match.arg(optimizer)
if (optimizer == "auto") {
if (numExpr > 1) {
optimizer = "ga"
}
else {
optimizer = "es"
}
}
if (popSize == "auto") {
if (optimizer == "ga") {
popSize = 200
}
else {
popSize = 8
}
}
if (iterations == "auto") {
iterations = 1000
num.grammar.expr = GrammarNumOfExpressions(grammarDef, max.depth, startSymb)
iterations = round(min(num.grammar.expr/popSize * 2, iterations))
if (optimizer == "ga") {
iterations = round(iterations/5)
}
}
if (optimizer == "es" && newPerGen == "auto") {
if (GrammarIsRecursive(grammarDef)) {
newPerGen = popSize
popSize = 0
}
else {
newPerGen = round(popSize/4)
popSize = popSize - newPerGen
}
}
if (is.na(mutationChance)) {
if (optimizer == "es") {
mutationChance <- min(0.1, 5/(1 + chromosomeLen))
}
else {
mutationChance <- 1/(1 + chromosomeLen)
}
}
if (numExpr == 1) {
ind.cut <- 1
geneCrossoverPoints <- NULL
}
else {
ind.cut <- as.numeric(cut(1:chromosomeLen, numExpr))
geneCrossoverPoints <- ind.cut
}
chromToExprList <- function(chromosome) {
expr.list = c()
for (i in 1:numExpr) {
ch <- chromosome[ind.cut == i]
expr <- GrammarMap(ch, grammarDef, wrappings = wrappings)
if (expr$type == "T") {
expr.list <- c(expr.list, as.expression(expr))
}
}
return(expr.list)
}
ga.evalFunc <- function(chromosome) {
expr.list = chromToExprList(chromosome)
if (length(expr.list) == 0) {
return(Inf)
}
if (disable.warnings) {
eval.results = suppressWarnings(evalFunc(expr.list))
}
else {
eval.results = evalFunc(expr.list)
}
return(eval.results)
}
add.expression.to.results <- function(ga.result) {
ga.result$best$expressions = chromToExprList(ga.result$best$genome)
class(ga.result) <- "GrammaticalEvolution"
return(ga.result)
}
if (!is.null(monitorFunc)) {
ga.monFunc <- function(result) {
monitorFunc(add.expression.to.results(result))
}
}
else {
ga.monFunc <- NULL
}
if (optimizer == "ga") {
result <- GeneticAlg.int(genomeLen = chromosomeLen, codonMin = 0, codonMax = GrammarMaxRuleSize(grammarDef) - 1, evalFunc = ga.evalFunc, suggestions = suggestions, popSize = popSize, iterations = iterations, elitism = elitism, mutationChance = mutationChance, geneCrossoverPoints = geneCrossoverPoints, terminationCost = terminationCost, monitorFunc = ga.monFunc, allowrepeat = TRUE, plapply = plapply, ...)
}
else {
result <- EvolutionStrategy.int(genomeLen = chromosomeLen, codonMin = 0, codonMax = GrammarMaxRuleSize(grammarDef) - 1, evalFunc = ga.evalFunc, suggestion = suggestions, mutationChance = mutationChance, popSize = popSize, newPerGen = newPerGen, iterations = iterations, terminationCost = terminationCost, monitorFunc = ga.monFunc, allowrepeat = TRUE, plapply = plapply, ...)
}
return(add.expression.to.results(result))
}
|
69b23c05512466e7413e8d8e419e727fe31b32fe
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/moezipfR/examples/rmoezipf.Rd.R
|
72d9eb4bf9a63a96ed8a438be2b0be7d912ec82e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 141
|
r
|
rmoezipf.Rd.R
|
library(moezipfR)
### Name: rmoezipf
### Title: Random number generator.
### Aliases: rmoezipf
### ** Examples
rmoezipf(10, 2.5, 1.3)
|
306a1c42142016f8dc4195d24755107650a6b868
|
ebb1f13d0493dc91d0099eb3d7cb8182210ab157
|
/EleicoesTercRepublica.R
|
ab5b3e17a412fb456f146724ecb02dcfde019b90
|
[] |
no_license
|
ngiachetta/work
|
acb6fd73fce0fe127dfb521793e4547785449373
|
f34b92c333fd23c0a8c90e576342ac40f1c9980d
|
refs/heads/master
| 2020-12-30T16:45:24.493522
| 2017-11-21T17:35:56
| 2017-11-21T17:35:56
| 91,018,635
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,397
|
r
|
EleicoesTercRepublica.R
|
# Analise de dados para materia: FLP
# Pacotes
library(dplyr)
library(purrr)
library(ggplot2)
library(stringr)
library(readr)
library(ggthemes)
library(RColorBrewer)
## Eleicoes gerais: 1945, 1947, 1950, 1954, 1958 e 1962
labels.partido <- c("DATA_GERACAO", "HORA_GERACAO", "ANO_ELEICAO", "NUM_TURNO", "DESCRICAO_ELEICAO", "SIGLA_UF", "SIGLA_UE",
"CODIGO_CARGO", "DESCRICAO_CARGO", "TIPO_LEGENDA", "NOME_COLIGACAO", "COMPOSICAO_LEGENDA", "SIGLA_PARTIDO", "NUMERO_PARTIDO",
"NOME_PARTIDO", "QTDE_VOTOS_NOMINAIS", "QTDE_VOTOS_LEGENDA")
labels.detalhe <- c("DATA_GERACAO", "HORA_GERACAO", "ANO_ELEICAO", "NUM_TURNO",
"DESCRICAO_ELEICAO", "SIGLA_UF", "SIGLA_UE", "CODIGO_CARGO",
"DESCRICAO_CARGO","QTD_APTOS","QTD_COMPARECIMENTO",
"QTD_ABSTENCOES","QTD_VOTOS_NOMINAIS","QTD_VOTOS_BRANCOS",
"QTD_VOTOS_NULOS","QTD_VOTOS_LEGENDA","QTD_VOTOS_ANULADOS_APU_SEP",
"QTD_SECOES_TOT","QTD_SECOES_ANULADAS","QTD_SECOES_SEM_FUNCION",
"QTD_ZONAS_ELEITORAIS","QTD_JUNTAS_APURADORAS")
labels.candidato <- c("DATA_GERACAO", "HORA_GERACAO", "ANO_ELEICAO", "NUM_TURNO", "DESCRICAO_ELEICAO", "SIGLA_UF", "SIGLA_UE",
"CODIGO_CARGO","NUMERO CAND", "SQ_CANDIDATO", "NOME_CANDIDATO",
"NOME_URNA_CANDIDATO","DESCRICAO_CARGO","COD_SIT_CAND_SUPERIOR",
"DESC_SIT_CAND_SUPERIOR", "CODIGO_SIT_CANDIDATO",
"DESC_SIT_CANDIDATO", "CODIGO_SIT_CAND_TOT", "DESC_SIT_CAND_TOT",
"NUMERO_PARTIDO","SIGLA_PARTIDO", "NOME_PARTIDO",
"SEQUENCIAL_LEGENDA", "NOME_COLIGACAO", "COMPOSICAO_LEGENDA",
"TOTAL_VOTOS")
arquivo.partido <- "VOTACAO_PARTIDO_UF_ANO_UNIDF.txt"
arquivo.detalhe <- "DETALHE_VOTACAO_UF_ANO_UNIDF.txt"
arquivo.cand <- "VOTACAO_CANDIDATO_UF_ANO_UNIDF.txt"
### 1945
setwd()
uf.partido <- c("AC", "AL", "AM", "BA", "CE", "DF", "ES", "GO", "MA", "MG", "MT", "PA", "PB", "PE", "PI", "PR", "RJ", "RN",
"RS", "SC", "SE", "SP")
uf.detalhe <- uf.partido
uf.cand <- c(uf.partido, "Fernando de Noronha", "Iguaçu", "Ponta Porã")
arq.ufs.partido <- purrr::map(arquivo.partido, str_replace, "UNIDF", uf.partido)
arq.ufs.partido <- map(arq.ufs.partido, str_replace, "ANO", as.character(1945))
arq.ufs.detalhe <- purrr::map(arquivo.detalhe, str_replace, "UNIDF", uf.detalhe)
arq.ufs.detalhe <- map(arq.ufs.detalhe, str_replace, "ANO", as.character(1945))
arq.ufs.cand <- purrr::map(arquivo.cand, str_replace, "UNIDF", uf.detalhe)
arq.ufs.cand <- map(arq.ufs.cand, str_replace, "ANO", as.character(1945))
votPart45 <- purrr::map_df(unlist(arq.ufs.partido), read_csv2, col_names = labels.partido, locale=locale(encoding = "latin1"))
votDeta45 <- purrr::map_df(unlist(arq.ufs.detalhe), read_csv2, col_names = labels.detalhe, locale=locale(encoding = "latin1"))
votCand45 <- purrr::map_df(unlist(arq.ufs.cand), read_csv2, col_names = labels.candidato, locale=locale(encoding = "latin1"))
### 1947
setwd()
uf.partido <- c("AL", "AM","AP" ,"BA", "CE", "DF", "ES", "GO","GP", "MA", "MG", "MT", "PA", "PB", "PE", "PI", "PR","RB", "RJ", "RN",
"RS", "SC", "SE", "SP")
uf.detalhe <- uf.partido
uf.cand <- uf.partido
arq.ufs.partido <- purrr::map(arquivo.partido, str_replace, "UNIDF", uf.partido)
arq.ufs.partido <- map(arq.ufs.partido, str_replace, "ANO", as.character(1947))
arq.ufs.detalhe <- purrr::map(arquivo.detalhe, str_replace, "UNIDF", uf.detalhe)
arq.ufs.detalhe <- map(arq.ufs.detalhe, str_replace, "ANO", as.character(1947))
arq.ufs.cand <- purrr::map(arquivo.cand, str_replace, "UNIDF", uf.detalhe)
arq.ufs.cand <- map(arq.ufs.cand, str_replace, "ANO", as.character(1947))
votPart47 <- purrr::map_df(unlist(arq.ufs.partido), read_csv2, col_names = labels.partido, locale=locale(encoding = "latin1"))
votDeta47 <- purrr::map_df(unlist(arq.ufs.detalhe), read_csv2, col_names = labels.detalhe, locale=locale(encoding = "latin1"))
votCand47 <- purrr::map_df(unlist(arq.ufs.cand), read_csv2, col_names = labels.candidato, locale=locale(encoding = "latin1"))
### 195O
setwd()
uf.partido <- c("AC", "AL", "AM", "AP", "BA", "CE", "DF", "ES", "GO","GP", "MA", "MG", "MT", "PA", "PB", "PE", "PI", "PR","PB" ,"RJ", "RN",
"RS", "SC", "SE", "SP")
uf.detalhe <- uf.partido
uf.cand <- uf.partido
arq.ufs.partido <- purrr::map(arquivo.partido, str_replace, "UNIDF", uf.partido)
arq.ufs.partido <- map(arq.ufs.partido, str_replace, "ANO", as.character(1950))
arq.ufs.detalhe <- purrr::map(arquivo.detalhe, str_replace, "UNIDF", uf.detalhe)
arq.ufs.detalhe <- map(arq.ufs.detalhe, str_replace, "ANO", as.character(1950))
arq.ufs.cand <- purrr::map(arquivo.cand, str_replace, "UNIDF", uf.detalhe)
arq.ufs.cand <- map(arq.ufs.cand, str_replace, "ANO", as.character(1950))
votPart50 <- purrr::map_df(unlist(arq.ufs.partido), read_csv2, col_names = labels.partido, locale=locale(encoding = "latin1"))
votDeta50 <- purrr::map_df(unlist(arq.ufs.detalhe), read_csv2, col_names = labels.detalhe, locale=locale(encoding = "latin1"))
votCand50 <- purrr::map_df(unlist(arq.ufs.cand), read_csv2, col_names = labels.candidato, locale=locale(encoding = "latin1"))
### 1954
setwd()
uf.partido <- c("AC", "AL", "AM", "AP", "BA", "CE", "DF", "ES", "GO","GP", "MA", "MG", "MT", "PA", "PB", "PE", "PI", "PR","PB" ,"RJ", "RN",
"RS", "SC", "SE", "SP")
uf.detalhe <- uf.partido
uf.cand <- uf.partido
arq.ufs.partido <- purrr::map(arquivo.partido, str_replace, "UNIDF", uf.partido)
arq.ufs.partido <- map(arq.ufs.partido, str_replace, "ANO", as.character(1954))
arq.ufs.detalhe <- purrr::map(arquivo.detalhe, str_replace, "UNIDF", uf.detalhe)
arq.ufs.detalhe <- map(arq.ufs.detalhe, str_replace, "ANO", as.character(1954))
arq.ufs.cand <- purrr::map(arquivo.cand, str_replace, "UNIDF", uf.detalhe)
arq.ufs.cand <- map(arq.ufs.cand, str_replace, "ANO", as.character(1954))
votPart54 <- purrr::map_df(unlist(arq.ufs.partido), read_csv2, col_names = labels.partido, locale=locale(encoding = "latin1"))
votDeta54 <- purrr::map_df(unlist(arq.ufs.detalhe), read_csv2, col_names = labels.detalhe, locale=locale(encoding = "latin1"))
votCand54 <- purrr::map_df(unlist(arq.ufs.cand), read_csv2, col_names = labels.candidato, locale=locale(encoding = "latin1"))
### 1958
setwd()
uf.partido <- c("AC", "AL", "AM", "AP", "BA", "CE", "DF", "ES", "GO", "MA", "MG", "MT", "PA", "PB", "PE", "PI", "PR","PB","RB" ,"RJ", "RN","RO",
"RS", "SC", "SE", "SP")
uf.detalhe <- uf.partido
uf.cand <- uf.partido
arq.ufs.partido <- purrr::map(arquivo.partido, str_replace, "UNIDF", uf.partido)
arq.ufs.partido <- map(arq.ufs.partido, str_replace, "ANO", as.character(1958))
arq.ufs.detalhe <- purrr::map(arquivo.detalhe, str_replace, "UNIDF", uf.detalhe)
arq.ufs.detalhe <- map(arq.ufs.detalhe, str_replace, "ANO", as.character(1958))
arq.ufs.cand <- purrr::map(arquivo.cand, str_replace, "UNIDF", uf.detalhe)
arq.ufs.cand <- map(arq.ufs.cand, str_replace, "ANO", as.character(1958))
votPart58 <- purrr::map_df(unlist(arq.ufs.partido), read_csv2, col_names = labels.partido, locale=locale(encoding = "latin1"))
votDeta58 <- purrr::map_df(unlist(arq.ufs.detalhe), read_csv2, col_names = labels.detalhe, locale=locale(encoding = "latin1"))
votCand58 <- purrr::map_df(unlist(arq.ufs.cand), read_csv2, col_names = labels.candidato, locale=locale(encoding = "latin1"))
### 1962
setwd()
uf.partido <- c("AC", "AL", "AM", "AP", "BA", "CE", "ES", "GB","GO", "MA", "MG", "MT", "PA", "PB", "PE", "PI", "PR","PB" ,"RJ", "RN","RO",
"RS", "SC", "SE", "SP")
uf.detalhe <- uf.partido
uf.cand <- uf.partido
arq.ufs.partido <- purrr::map(arquivo.partido, str_replace, "UNIDF", uf.partido)
arq.ufs.partido <- map(arq.ufs.partido, str_replace, "ANO", as.character(1962))
arq.ufs.detalhe <- purrr::map(arquivo.detalhe, str_replace, "UNIDF", uf.detalhe)
arq.ufs.detalhe <- map(arq.ufs.detalhe, str_replace, "ANO", as.character(1962))
arq.ufs.cand <- purrr::map(arquivo.cand, str_replace, "UNIDF", uf.detalhe)
arq.ufs.cand <- map(arq.ufs.cand, str_replace, "ANO", as.character(1962))
votPart62 <- purrr::map_df(unlist(arq.ufs.partido), read_csv2, col_names = labels.partido, locale=locale(encoding = "latin1"))
votDeta62 <- purrr::map_df(unlist(arq.ufs.detalhe), read_csv2, col_names = labels.detalhe, locale=locale(encoding = "latin1"))
votCand62 <- purrr::map_df(unlist(arq.ufs.cand), read_csv2, col_names = labels.candidato, locale=locale(encoding = "latin1"))
rm(uf.partido, uf.detalhe, uf.cand, arq.ufs.partido, arq.ufs.detalhe, arq.ufs.cand, labels.partido, labels.detalhe, labels.candidato, arquivo.cand, arquivo.partido, arquivo.detalhe)
### Juntando todas as bases
### votPartANO
votPart <- bind_rows(votPart45, votPart47)
votPart <- bind_rows(votPart, votPart50)
votPart <- bind_rows(votPart, votPart54)
votPart <- bind_rows(votPart, votPart58)
votPart <- bind_rows(votPart, votPart62)
rm(votPart45, votPart47, votPart50, votPart54, votPart58, votPart62)
### votCandANO
votCand <- bind_rows(votCand45, votCand47)
votCand <- bind_rows(votCand, votCand50)
votCand <- bind_rows(votCand, votCand54)
votCand <- bind_rows(votCand, votCand58)
votCand <- bind_rows(votCand, votCand62)
rm(votCand45, votCand47, votCand50, votCand54, votCand58, votCand62)
### votDetaANO
votDeta <- bind_rows(votDeta45, votDeta47)
votDeta <- bind_rows(votDeta, votDeta50)
votDeta <- bind_rows(votDeta, votDeta54)
votDeta <- bind_rows(votDeta, votDeta58)
votDeta <- bind_rows(votDeta, votDeta62)
rm(votDeta45, votDeta47, votDeta50, votDeta54, votDeta58, votDeta62)
### Analises
#### Ampliação da participação
# Aptos para votar
votDeta %>% filter(DESCRICAO_CARGO =="DEPUTADO FEDERAL")%>% group_by(ANO_ELEICAO) %>% summarise(APTOS = sum(QTD_APTOS), COMP = sum(QTD_COMPARECIMENTO)) %>%
ggplot(., aes(x = ANO_ELEICAO, y = APTOS)) + geom_line(color = 'orange', size = 1) + theme_wsj() + scale_x_continuous(breaks = c(1945, 1947, seq(1950,1962,4)))+
ggtitle(label = "Quantidade de eleitores aptos para votar", subtitle = "Eleições para Deputados Federais")
# Aptos para votar por UE
votDeta %>% filter(DESCRICAO_CARGO =="DEPUTADO FEDERAL")%>% group_by(ANO_ELEICAO, SIGLA_UE) %>% summarise(APTOS = sum(QTD_APTOS), COMP = sum(QTD_COMPARECIMENTO)) %>%
ggplot(., aes(x = ANO_ELEICAO, y = APTOS)) + geom_line(aes(color = SIGLA_UE), size = 1) + theme_wsj() + scale_x_continuous(breaks = c(1945, 1947, seq(1950,1962,4)))+
ggtitle(label = "Quantidade de eleitores aptos para votar por UE", subtitle = "Eleições para Deputados Federais")
# Comparecimento/Aptos para votar
votDeta %>% filter(DESCRICAO_CARGO =="DEPUTADO FEDERAL")%>% group_by(ANO_ELEICAO) %>% summarise(CompAptos = sum(QTD_COMPARECIMENTO)/sum(QTD_APTOS)) %>%
ggplot(., aes(x = ANO_ELEICAO, y = CompAptos)) + geom_line(color = 'orange', size = 1) + theme_wsj() + scale_x_continuous(breaks = c(1945, 1947, seq(1950,1962,4)))+
ggtitle(label = "Proporção Comparecimento / Aptos", subtitle = "Eleições para Deputados Federais")
# Comparecimento/Aptos para votar por UE
votDeta %>% filter(DESCRICAO_CARGO =="DEPUTADO FEDERAL")%>% group_by(ANO_ELEICAO, SIGLA_UE) %>% summarise(CompAptos = sum(QTD_COMPARECIMENTO)/sum(QTD_APTOS)) %>%
ggplot(., aes(x = ANO_ELEICAO, y = CompAptos)) + geom_line(aes(color = SIGLA_UE), size = 1) + theme_wsj() + scale_x_continuous(breaks = c(1945, 1947, seq(1950,1962,4)))+
ggtitle(label = "Proporção Comparecimento / Aptos por UE", subtitle = "Eleições para Deputados Federais")
# Abstenções
votDeta %>% filter(DESCRICAO_CARGO =="DEPUTADO FEDERAL")%>% group_by(ANO_ELEICAO) %>% summarise(ABST = sum(QTD_ABSTENCOES), COMP = sum(QTD_COMPARECIMENTO)) %>%
ggplot(., aes(x = ANO_ELEICAO, y = ABST)) + geom_line(color = 'orange', size = 1) + theme_wsj() + scale_x_continuous(breaks = c(1945, 1947, seq(1950,1962,4)))+
ggtitle(label = "Quantidade de abstenções", subtitle = "Eleições para Deputados Federais")
# Abstenções por UE
votDeta %>% filter(DESCRICAO_CARGO =="DEPUTADO FEDERAL")%>% group_by(ANO_ELEICAO) %>% summarise(ABST = sum(QTD_ABSTENCOES), COMP = sum(QTD_COMPARECIMENTO)) %>%
ggplot(., aes(x = ANO_ELEICAO, y = ABST)) + geom_line(aes(color = SIGLA_UE), size = 1) + theme_wsj() + scale_x_continuous(breaks = c(1945, 1947, seq(1950,1962,4)))+
ggtitle(label = "Quantidade de abstenções", subtitle = "Eleições para Deputados Federais")
# Quantidade de votos recebidos por partido (UDN, PSD, PTB)
votPart %>% filter(DESCRICAO_CARGO == "DEPUTADO FEDERAL", SIGLA_PARTIDO %in% c("UDN", "PSD", "PTB")) %>% group_by(ANO_ELEICAO, SIGLA_PARTIDO) %>% summarise(TOTAL_RECEBIDOS = sum(QTDE_VOTOS_NOMINAIS) + sum(QTDE_VOTOS_LEGENDA)) %>%
ggplot(., aes(x = ANO_ELEICAO, y = TOTAL_RECEBIDOS))+ geom_line(aes(color = SIGLA_PARTIDO)) +theme_wsj() +scale_x_continuous(breaks = c(1945, 1947, seq(1950,1962,4)))+
ggtitle(label = "Quantidade de votos recebidos por partido", subtitle = "Eleições para Deputados Federais")
# Quem foi eleito pelo PSD em 1945?
votCand %>% filter(DESCRICAO_CARGO == "DEPUTADO FEDERAL", SIGLA_PARTIDO == "PSD",
ANO_ELEICAO == 1945, DESC_SIT_CAND_TOT == "ELEITO", TOTAL_VOTOS != 0) %>%
arrange(desc(TOTAL_VOTOS)) %>% .[1:5,] %>%
ggplot(., aes(x = NOME_CANDIDATO, TOTAL_VOTOS)) +geom_bar(stat = 'identity') +theme_wsj()+
theme(axis.text.x=element_text(angle=45,hjust=1))+ggtitle(label = "Quantidade de votos PSD")
# Quem foram os eleito pelo PSD e UDN em 1945? (Não deu certo!)
votCand %>% filter(DESCRICAO_CARGO == "DEPUTADO FEDERAL", SIGLA_PARTIDO %in% c("PSD", "UDN"),
ANO_ELEICAO == 1945, DESC_SIT_CAND_TOT == "ELEITO", TOTAL_VOTOS != 0) %>%
arrange(desc(TOTAL_VOTOS)) %>% .[1:20,] %>%
ggplot(., aes(x = NOME_CANDIDATO, TOTAL_VOTOS)) +geom_bar(stat = 'identity') +theme_wsj()+
theme(axis.text.x=element_text(angle=45,hjust=1))+ggtitle(label = "Quantidade de votos PSD") +
facet_wrap(~SIGLA_PARTIDO)
# Qual foi o desempenho do PCB em comparação com os outros partidos (VOTOS TOTAIS)
votCand %>% filter(DESCRICAO_CARGO == "PRESIDENTE", ANO_ELEICAO == 1945, SIGLA_PARTIDO %in% c("PCB", "PSD", "UDN")) %>%
ggplot(aes(x = SIGLA_UF, y = TOTAL_VOTOS)) + geom_bar(stat = "identity", aes(fill = SIGLA_PARTIDO), position = "dodge")+
theme_wsj()+scale_fill_brewer(palette="Dark2")+ggtitle(label = "Quantidade de voto recebidos na eleição presidencial", subtitle = "Eleição de 1945")+
theme(legend.position = "bottom")
# Qual foi o desempenho do PCB em comparação com os outros partidos (Porcentagem por Estados)
votCand %>%
filter(DESCRICAO_CARGO == "PRESIDENTE",
ANO_ELEICAO == 1945,
SIGLA_PARTIDO %in% c("PCB", "PSD", "UDN")) %>%
group_by(SIGLA_UF, SIGLA_PARTIDO) %>%
summarise(TOTAL_VOTOS = sum(TOTAL_VOTOS)) %>%
ungroup() %>%
group_by(SIGLA_UF) %>%
mutate(TOTAL_REC = sum(TOTAL_VOTOS),
PORC = TOTAL_VOTOS/TOTAL_REC) %>%
ggplot(aes(x = SIGLA_UF, y = PORC)) + geom_bar(stat = "identity", aes(fill = SIGLA_PARTIDO), position = "dodge",width = 0.85)+
theme_wsj()+scale_fill_brewer(palette="Dark2")+ggtitle(label = "Porcentagem de votos recebidos\nna eleição presidencial", subtitle = "Eleição de 1945")+
theme(legend.position = "bottom")+scale_y_continuous(breaks = seq(.1, 1, .1), limits = c(0, 1))
|
dc857d0c8e1d9cb223170dcc0d546aa0e47c5e50
|
e4901ac0d0866b2a3ad4f14474e941e79e23f3ee
|
/RandomForests.R
|
df6c0313d1133dbd6ce520f15ca1d3a1b2b6d44d
|
[] |
no_license
|
MarianneLawless/Random-Forests-algorithm-R
|
03383c5f3a11739b09f6144f794f6ff101383191
|
74cd9c7c0d7d20236af89ccbcfe118f7f8a288eb
|
refs/heads/master
| 2023-03-16T15:03:46.621507
| 2020-01-18T15:01:15
| 2020-01-18T15:01:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,580
|
r
|
RandomForests.R
|
#Random Forest is created by aggregating trees
#Can be used for classification or regression
#Can deal with large number of features
#Avoids overfitting
#It deals with only parameters
# Download Data
setwd("C:/Users/Alexander/OneDrive - National College of Ireland/4th Year/Semester 2/Web Mining/CA2/BS-updated")
BikeDayCleaned<-read.csv("BikeDayCleaned.csv")
BikeHoursCleaned<-read.csv("BikeHourCleaned.csv")
#checking data structure
str(BikeDayCleaned)
#data partition
set.seed(123)
ind<-sample(2, nrow(BikeDayCleaned), replace=TRUE, prob=c(0.7,0.3))
train<-BikeDayCleaned[ind==1,]
test<-BikeDayCleaned[ind==2,]
#random forest
library(randomForest)
set.seed(222)
rf<-randomForest(count ~ season + year+ month+holiday + weekday+workingday+ temp +weather + atemp + humidity+windspeed, data=train)
print(rf)
#checking rf attributes
attributes(rf)
#prediction and actual data
library(caret)
prediction1<-predict(rf,train)
head(prediction1)
head(train$count)
#Graph
plot(prediction1)
#Get Percentages
RMSE(prediction1, train$count) # Mean Squared Error (MSE)
#load library to use MAPE() function
library(MLmetrics)
MAPE(prediction1, train$count) #Mean Absolute Percentage Error
MAE(prediction1, train$count) #Mean Average Error
#prediction with test data
prediction2<-predict(rf,test)
#error rate of random forest
plot(rf)
#tune mtry
train2<-train[,2:15]
tuned<-tuneRF(train2[,-8], train2[,8],
stepFactor = 0.5,
plot=TRUE,
ntreeTry = 300,
trace=TRUE,
improve=0.05)
|
7a681bb3fb4dbfdf6f77d2325665be6628094b3e
|
b4a07b5123b9eb6fd25ac001c24b9a84a78d5683
|
/data/performance.R
|
246bbbfd40b7d9e7d51a96ca6e8f8925fe442064
|
[] |
no_license
|
o19s/TREC_run_explorer_app
|
889bfc37d869e58c1326c45641426a01da18fdd8
|
454b2fd67615325bd975c31a6a0c66a5310995fa
|
refs/heads/main
| 2023-02-05T23:26:19.068849
| 2020-12-21T19:47:32
| 2020-12-21T19:47:32
| 323,372,920
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,124
|
r
|
performance.R
|
library(tidyverse)
us <- dir("trec2020newstrackbackgroundlinkingresults/", "trec_eval", full.names = T) %>%
set_names(gsub(".*results\\//(.*)\\..*", "\\1", .)) %>%
map_df(~read_tsv(., col_names = F), .id = "run") %>%
filter(!is.na(X2)) %>% # overall stats
filter(X1 == "ndcg_cut_5") %>%
select(-X1) %>%
mutate(X3 = as.numeric(X3),
run = gsub("tune_ners_embed", "mlt_tune_ners_sbert", run)) %>%
rename(
topic = X2,
val = X3
) %>%
mutate(run = gsub("mlt_", "", run) %>%
factor(levels = c("base", "tune", "ners", "embed")))
saveRDS(us, "run_explorer/data/osc_run_explorer_data.RDS")
us %>%
group_by(run) %>%
summarise(y = mean(val),
lbl = round(y, 3)) -> lbls
ggplot(us, aes(run, val, color = run)) +
ggbeeswarm::geom_quasirandom(size = 4, alpha = .5, width = .15) +
stat_summary(geom = "crossbar", fun.data = mean_cl_normal, show.legend = F) +
geom_label(data = lbls, aes(y = lbl, label = lbl), size = 5, show.legend = F) +
scale_color_brewer(palette = "Dark2", name = NULL) +
theme(axis.text.x = element_blank()) +
labs(y = "nDCG@5")
|
a8d40e2427098072891d69148b1aa9db5ab42ee0
|
e1d25753f7e5d445abd805d52181c6e920756a08
|
/TFL/shinyapps/global.R
|
e08edf4b3d2744eddfb428d89547bb27509d3998
|
[] |
no_license
|
boriscooper/nginx-rshiny
|
02af54adbc284d75dea8549c243eba8490513ced
|
1c4d3f6705ddce60eed59d1adff2868a7840ca3d
|
refs/heads/master
| 2020-05-18T11:31:35.923300
| 2019-05-01T15:18:06
| 2019-05-01T15:18:06
| 184,381,814
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,483
|
r
|
global.R
|
## Start up database
## LIBRARY PACKAGES
library(sf)
library(dplyr)
library(data.table)
library(ggplot2)
library(shiny)
library(shinydashboard)
library(leaflet)
library(geojsonsf)
library(rgdal)
##-------------------------------------------------------
base_dir <- "/srv/shiny-server"
data_dir <- file.path(base_dir, "data")
## Bus stop data
lat <- 51.5
lon <- 0.0
zoom <- 10
##------------------------------------------------------
ukgrid <- "+init=epsg:27700"
latlong <- "+init=epsg:4326"
bus_stopsDT <- fread(file = file.path(data_dir, "bus-stops-10-06-15.csv"))
bus_stopsDT <- bus_stopsDT[1:nrow(bus_stopsDT)-1, ] ## remove empty last row
coordsDT <- bus_stopsDT[, .(Location_Easting, Location_Northing)]
##-------------------------------------------------------
## function to add lon and lat coordinates to sequence data table
new_sequence <- function(bus_stopsDT, sequenceDT){
setkey(sequenceDT, Stop_Code_LBSL)
result <- merge(sequenceDT, bus_stopsDT, on = "Stop_Code_LBSL")
result <- result[, .(Stop_Code_LBSL, Route, Run, Sequence, Heading.x, Stop_Name.x, lon, lat )]
result[, Run := ifelse(Run == 1, "Out", "Return")]
old_names <- names(result)
new_names <- c("Stop_Code_LBSL", "Route", "Run", "Sequence", "Heading", "Stop_Name", "lon", "lat")
setnames(result, old = old_names, new = new_names)
setkey(result, Route, Run, Sequence)
result
}
##-------------------------------------------------------
## function to select route data table
route_sequence <- function(sequenceDT, route_number, direction = 1){
result <- sequenceDT[Route == route_number & Run == direction]
result
}
##---------------------------------------------------
# Create the SpatialPointsDataFrame
bus_stops_SP <- SpatialPointsDataFrame(coords = coordsDT, data = bus_stopsDT[, .(Stop_Code_LBSL, Bus_Stop_Code, Stop_Name)],
proj4string = CRS(ukgrid))
### Convert
bus_stops_SP_LL <- spTransform(bus_stops_SP, CRS(latlong))
bus_stopsDT[, lon := coordinates(bus_stops_SP_LL)[,1]][, lat := coordinates(bus_stops_SP_LL)[,2]]
setkey(bus_stopsDT, Stop_Code_LBSL)
##--------------------------------------------------------
## Bus stop sequence examples
bus_stop_sequenceDT <- fread(file = file.path(data_dir, "stop-sequences-example.csv"))
new_bus_stop_sequenceDT <-new_sequence(bus_stopsDT, bus_stop_sequenceDT)
##-----------------------------------------------------------
|
f22fe19ba0704dd5e6ada57f3170e9559c9db2a7
|
3d9f876272743b98299b08d9b435ffc9c50cebb7
|
/R/bi_open.R
|
f4ceb2f07a4b2e7a1dd45b8c8e0f4321dd2322c7
|
[] |
no_license
|
tyler-abbot/RBi
|
4342ed0a38421821de813399afc39223ec724251
|
fc71463b66235b7f44deb41628a94b211708d9bc
|
refs/heads/master
| 2021-05-03T11:27:54.067395
| 2016-10-04T10:23:28
| 2016-10-04T10:23:28
| 69,964,424
| 0
| 0
| null | 2016-10-04T12:48:55
| 2016-10-04T12:48:55
| null |
UTF-8
|
R
| false
| false
| 941
|
r
|
bi_open.R
|
#' @rdname bi_open
#' @name bi_open
#' @title Bi open
#' @description
#' This function opens an NetCDF file
#' The file can be specified as a string to the filepath, in which
#' case a NetCDF connection is opened, or directly as a NetCDF connection.
#'
#' @param read either a path to a NetCDF file, or a NetCDF connection created using \code{nc_open}, or a \code{\link{libbi}} object from which to read the output
#' @return open NetCDF connection
#' @importFrom ncdf4 nc_open
bi_open <- function(read)
{
if (typeof(read) == "character"){
nc <- nc_open(tools::file_path_as_absolute(read))
} else if (class(read) == "ncdf4") {
nc <- read
} else if (class(read) == "libbi"){
if (!read$run_flag) {
stop("The libbi object should be run first")
}
nc <- nc_open(tools::file_path_as_absolute(read$result$output_file_name))
} else {
stop("'read' must be a string, ncdf4 or libbi object.")
}
return(nc)
}
|
4b3aaf2cd17369b0c6f26bec312498ad40fc3916
|
2d9fb03feb8626c67ba5d3f1a0815710b621c5f6
|
/man/activity_specialization.Rd
|
4c1fa74ceb38814601a5bdcab7fa596802256772
|
[] |
no_license
|
bbrewington/edeaR
|
4c8916bad4c54521764574770ae941983363dc0a
|
02b31d133b5cec68caa6e0c5fa446a6a6275d462
|
refs/heads/master
| 2021-01-19T18:32:49.442081
| 2016-08-27T17:31:36
| 2016-08-27T17:31:36
| 66,726,375
| 0
| 0
| null | 2016-08-27T17:17:51
| 2016-08-27T17:17:51
| null |
UTF-8
|
R
| false
| true
| 552
|
rd
|
activity_specialization.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/activity_specialization.R
\name{activity_specialization}
\alias{activity_specialization}
\title{Metric: Activity Specialization}
\usage{
activity_specialization(eventlog, level_of_analysis)
}
\arguments{
\item{eventlog}{The event log to be used. An object of class
\code{eventlog}.}
\item{level_of_analysis}{At which level the analysis of coverage should be performed: case, activity}
}
\description{
Analyses whether activities are specialize in by specific resources
}
|
b7a7d470c2e74761e37067ea3742f6fd4a0a9c95
|
71bb8a2619c414a153297d2dfd2a5089c53c7f9e
|
/Operadores-em-R.R
|
010d898b521df8947766ca25bae88978f20684b6
|
[] |
no_license
|
BrunoVollin/learning-R
|
b1c5d4eb998ee060177ba1ab12fbd85d9ff82108
|
ecc75bd3358e9746a12335a78d0a6b683841b1f0
|
refs/heads/master
| 2023-03-10T09:40:42.968448
| 2021-02-26T16:29:54
| 2021-02-26T16:29:54
| 342,619,948
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 348
|
r
|
Operadores-em-R.R
|
###-----Operadores em R-----###
setwd("C:/Users/bruno/Documents/Operadores-em-R.R")
getwd()
##soma
4 + 5
##sub
7 - 4
##multi
2 * 8
##div
3 / 3
##pot
3^2
3**2
##mod
16 %% 3
### Operadores relacionais
##variaveis
x = 7
y = 5
x > 8
x < 8
x <= 8
x >= 8
## lógicos
( x == 8) & (x > y) ##and
( x == 8) | (x > y) ##or
print(!x < 8) ##not
|
6753e1afc201c51f54c2cf043c1965ccf4bcc485
|
8522b1f802fe496ae0c5bcc2b6ca1fee3a5e036a
|
/multiplot2.R
|
4f90dfa008ca6e94eeb44c15bef3319a2a5dd079
|
[] |
no_license
|
Neksta1/GMean
|
32084c018b926816850aebe6ea7f11dffe4edff6
|
0ac28f82bb1f1de01939b3cd67fcbad46d2d432f
|
refs/heads/master
| 2020-04-06T03:42:21.500455
| 2015-02-27T12:20:47
| 2015-02-27T12:20:47
| 30,014,561
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,066
|
r
|
multiplot2.R
|
```{r}
adccoefs <- rnorm(100, 0.001, sd = 0.0002)
adclines <-
alply(as.matrix(adccoefs), 1, function(adccoef) {
stat_function(fun=function(x){S01*exp(-x*adccoef[1])}, colour="grey")
})
```
```{r}
p <- ggplot(data = params)
p + adclines +
layer (stat = "function",
fun = sb1,
size = 1,
alpha = 0.8,
mapping = aes (color="sb1")
) +
layer (stat = "function",
fun = sb2,
size = 1,
alpha = 0.8,
mapping = aes (color="sb2")
) +
layer (stat = "function",
fun = sbf,
size = 1,
alpha = 0.8,
mapping = aes (color="sb3")
) +
layer (stat = "function",
fun = sbr,
size = 1,
alpha = 0.8,
mapping = aes (color="sb4")
) +
xlim(0,1000) +
scale_y_log10() +
xlab("b-value") +
ylab("Signal Intensity")
```
|
6518bd0e36975aee7c4887fb575a0c68b076fb99
|
bb8b96319d963e6b788af9a54ec05c0eed14c624
|
/scripts/tema1/11-binning-data.R
|
b8bf914f880564612fe67f2572e8694f2ef120e2
|
[
"MIT"
] |
permissive
|
diegogonda/r-course
|
26c065827180dae58dabe17a8a71f0d3097703ab
|
8f9d2102133bf1a6eb5d431ec06b9c408e5358b3
|
refs/heads/master
| 2021-08-18T02:53:55.972659
| 2018-12-02T17:08:26
| 2018-12-02T17:08:26
| 146,853,749
| 0
| 0
|
MIT
| 2018-08-31T06:54:24
| 2018-08-31T06:54:24
| null |
UTF-8
|
R
| false
| false
| 1,815
|
r
|
11-binning-data.R
|
# #d
students <- read.csv("../data/tema1/data-conversion.csv")
# #d queremos etiquetar a las personas en torno a sus ingresos como bajos, medios y altos
# #d creamos un vector de breakpoints (puntos de separacion, método cut) con estos datos.
# #d Inf: infinito
bp <- c(-Inf, 10000, 31000, Inf)
names <- c("Low", "Average", "High")
# #d Income.cat: cat es de categorica. Al ejecutarlo, añadiríamos una nueva columna con los nombres Low, Average, y high dependiendo de los ingresos
students$Income.cat <- cut(students$Income, breaks = bp, labels = names)
# #d A diferencia de la anterior, esta funcionalidad no asigna nombres "bonitos"
# #d sino que asigna el rango numérico, por ejemplo: (-Inf, 1e+04]
students$Income.cat2 <- cut(students$Income, breaks = bp)
# #d cuando no nos importa tanto el rango de los cortes sino que queremos que nos haga N cortes (4 en el ejemplo)
# #d de forma que los N cortes sean equitativos
students$Income.cat3 <- cut(students$Income,
breaks = 4, # #d numero de cortes que queremos
# #d quitando labels veriamos por dónde a cortado R
labels = c("Level 1", "Level 2",
"Level 3", "Level 4")
)
#dummy variables
# #d variables ficticias
students <- read.csv("../data/tema1/data-conversion.csv")
install.packages("dummies")
library(dummies)
# #d vamos a trabajar con regresion lineal o
# #d reproducimos variables categorícas como numéricas
students.dummy <- dummy.data.frame(students, sep = ".")
names(students.dummy)
# #d dummy de sólo una de las variables
dummy(students$State, sep=".")
# #d crear variables dummy para las especificadas: names
dummy.data.frame(students, names = c("State", "Gender"), sep = ".")
|
44a00995cb410add1ad77bfecaa9760a151fe60f
|
d43aa427f215e63d54526b85dce14ab957b028e2
|
/3 DATA WRANGLING/Data Wrangling Exercise 2 - Dealing with missing values/exercise.R
|
697e247336b99e572ea583316ac6c7ddea628403
|
[] |
no_license
|
akoukoullis/Springboard-Foundations-of-Data-Science
|
1a218ab971e1a9dd53b3a896b87e1cf49c218600
|
3a108ef187b1007eec3f0789c76f419f07dcdb0d
|
refs/heads/master
| 2021-01-12T15:50:45.882995
| 2017-02-18T10:52:42
| 2017-02-18T10:52:42
| 71,885,881
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,079
|
r
|
exercise.R
|
# Foundations of Data Science
# Data Wrangling Exercise 2: Dealing with missing values
# Author: Anthony Koukoullis
load_data <- function(filename){
result <- tryCatch({
table <- tbl_df(read.csv(filename, stringsAsFactors = FALSE))
return(table)
}, error = function(e){
print("Unfortunately the data couldn't be loaded: ", e)
return(FALSE)
})
return(result)
}
save_data <- function(table, filename){
tryCatch({
write.csv(table, file = filename, quote = TRUE, row.names = FALSE)
}, error = function(e){
print("Unfortunately the data couldn't be loaded: ", e)
})
}
clean_empty_rows <- function(table){
result <- tryCatch({
#table <- table[-which(is.na(table$pclass)),]
table <- subset(table, !is.na(pclass))
return(table)
}, error = function(e){
print("Unfortunately the embarked column couldn't be populated: ", e)
return(FALSE)
})
return(result)
}
port_of_embarkation <- function(table){
result <- tryCatch({
table[which(table$embarked == ""), "embarked"] <- "S"
return(table)
}, error = function(e){
print("Unfortunately the embarked column couldn't be populated: ", e)
return(FALSE)
})
return(result)
}
age <- function(table){
result <- tryCatch({
mean_age <- colMeans(subset(table, is.numeric(age) & !is.na(age))[,"age"])
table[is.na(table$age), "age"] <- mean_age
return(table)
}, error = function(e){
print("Unfortunately the embarked column couldn't be populated: ", e)
return(FALSE)
})
return(result)
}
lifeboat <- function(table){
result <- tryCatch({
table[is.na(table$boat) | table$boat == "", "boat"] <- "NA"
return(table)
}, error = function(e){
print("Unfortunately the embarked column couldn't be populated: ", e)
return(FALSE)
})
return(result)
}
lifeboat <- function(table){
result <- tryCatch({
table[is.na(table$boat) | table$boat == "", "boat"] <- "NA"
return(table)
}, error = function(e){
print("Unfortunately the embarked column couldn't be populated: ", e)
return(FALSE)
})
return(result)
}
cabin <- function(table){
result <- tryCatch({
table[is.na(table$cabin) | table$cabin == "", "has_cabin_number"] <- 0
table[!is.na(table$cabin) & table$cabin != "", "has_cabin_number"] <- 1
return(table)
}, error = function(e){
print("Unfortunately the embarked column couldn't be populated: ", e)
return(FALSE)
})
return(result)
}
tryCatch({
# Install package dependencies if not already installed
installed_packages <- installed.packages()
if (is.element("dplyr", installed_packages[,"Package"]) == FALSE) { install.packages("dplyr") }
if (is.element("tidyr", installed_packages[,"Package"]) == FALSE) { install.packages("tidyr") }
library("dplyr")
library("tidyr")
# Set the full path of your RStudio working directory to the "local_working_dir" variable if necessary,
# and uncomment the next two lines
#local_working_dir <- "/Users/akoukoullis/Documents/Springboard\ Foundations\ of\ Data\ Science/Exercises/3\ DATA\ WRANGLING/Data\ Wrangling\ Exercise\ 2\ -\ Dealing\ with\ missing\ values"
#setwd(local_working_dir)
# set the name of the original dataset filename
original_filename <- "titanic_original.csv"
# set the name of the clean dataset filename
clean_filename <- "titanic_clean.csv"
# Load data from csv file
titanic <- load_data(original_filename)
# Functions to clean up the data
titanic <- clean_empty_rows(titanic)
titanic <- port_of_embarkation(titanic)
titanic <- age(titanic)
titanic <- lifeboat(titanic)
titanic <- cabin(titanic)
# Save clean data to different csv file
save_data(titanic, clean_filename)
# View clean data
View(titanic)
}, error = function(e_global){
print("Unfortunately the script didn't complete its execution: ", e_global)
})
|
0d5ab8f358b007bf66d49635800d103f6e2bd0a9
|
cfb444f0995fce5f55e784d1e832852a55d8f744
|
/man/plikert.Rd
|
ed60891451b1e823c73ff9ba5b50afddaa73e561
|
[
"MIT"
] |
permissive
|
debruine/faux
|
3a9dfc44da66e245a7b807220dd7e7d4ecfa1317
|
f2be305bdc6e68658207b4ad1cdcd2d4baa1abb4
|
refs/heads/master
| 2023-07-19T18:28:54.258681
| 2023-07-07T16:59:24
| 2023-07-07T16:59:24
| 163,506,566
| 87
| 15
|
NOASSERTION
| 2023-01-30T10:09:37
| 2018-12-29T11:43:04
|
R
|
UTF-8
|
R
| false
| true
| 860
|
rd
|
plikert.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distribution_convertors.R
\name{plikert}
\alias{plikert}
\title{Likert distribution function}
\usage{
plikert(q, prob, labels = names(prob))
}
\arguments{
\item{q}{the vector of quantiles}
\item{prob}{a vector of probabilities or counts; if named, the output is a factor}
\item{labels}{a vector of values, defaults to names(prob) or 1:length(prob), if numeric, the output is numeric}
}
\value{
a vector of the densities
}
\description{
Likert distribution function
}
\examples{
q <- 1:5
prob <- c(.1, .2, .4, .2, .1)
plikert(q, prob)
q <- c("A", "C", "B", "B")
prob <- c(A = 10, B = 20, C = 30)
plikert(q, prob)
# specify labels if prob not named and not 1:length(prob)
labels <- -2:2
q <- labels
prob <- rep(1, length(labels)) # uniform probability
plikert(q, prob, labels)
}
|
eecf0c163fbe2882b4449a74cbb99754f74033a2
|
0785924afd7709630008f3d21aac7ebfb811a5fe
|
/finance_examples.R
|
01614a61025888128a29126c13f4441667d34286
|
[] |
no_license
|
leeslatergv/Examples
|
c8de15730bf2d62de977f60a49bd6a80475ebf9e
|
0c8a47f2e4d445de4df9b70ee484cb3aaebdbce5
|
refs/heads/main
| 2023-08-24T19:53:07.228710
| 2021-07-25T22:33:48
| 2021-07-25T22:33:48
| 388,572,703
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,887
|
r
|
finance_examples.R
|
# New code
create_summary_tables <- function(df, yearly = "", monthly = "", weekly = "") {
summary_function <- function(df, ...) {
dots <- enquos(...)
df %>%
dplyr::group_by(!!!dots, category) %>%
dplyr::summarise(cost = sum(amount)) %>%
dplyr::ungroup() %>%
dplyr::arrange(!!!dots) %>%
dplyr::mutate(cumulative_spend = cumsum(cost)) %>%
as.data.frame()
}
smr_m <- summary_function(data, month)
smr_w <- summary_function(data, week)
smr_y <- summary_function(data, year)
table_function <- function(x) {
x %>%
dplyr::select(-cumulative_spend) %>%
tidyr::pivot_wider(names_from = category, values_from = cost) %>%
as.data.frame()
}
yearly_table <- table_function(smr_y)
monthly_table <- table_function(smr_m)
weekly_table <- table_function(smr_y)
if (yearly == "Y" | yearly == "y" | monthly == "Y" |
monthly == "y" | weekly == "Y" | weekly == "y") {
yearly_table <<- yearly_table
}
if (yearly == "M" | yearly == "m" | monthly == "M" |
monthly == "m" | weekly == "M" | weekly == "m") {
monthly_table <<- monthly_table
}
if (yearly == "W" | yearly == "w" | monthly == "W" |
monthly == "w" | weekly == "W" | weekly == "w") {
weekly_table <<- weekly_table
}
}
create_summary_tables(data, "y", "m", "w")
# Old code
#create_summary_tables <- function(df, yearly = "", monthly = "", weekly = "") {
# smr_m <- df %>%
# ddply(.(month, category), summarise,
# cost = sum(amount)) %>%
# mutate(cumulative_spend = cumsum(cost))
# smr_w <- df %>%
# ddply(.(week, category), summarise,
# cost = sum(amount)) %>%
# mutate(cumulative_spend = cumsum(cost))
# smr_y <- df %>%
# ddply(.(year, category), summarise,
# cost = sum(amount)) %>%
# mutate(cumulative_spend = cumsum(cost))
# if (yearly == "Y" | yearly == "y") {
# # easy mode solution is for each if, to do
# # if (yearly == "Y" | yearly == "y") | yearly == "M" | yearly == "m" | yearly == "W" | yearly == "w" {
# yearly_table <<- smr_y %>%
# dplyr::select(-cumulative_spend) %>%
# tidyr::pivot_wider(names_from = category, values_from = cost) %>%
# as.data.frame()
# }
# if (monthly == "M" | monthly == "m") {
# monthly_table <<- smr_m %>%
# dplyr::select(-cumulative_spend) %>%
# tidyr::pivot_wider(names_from = category, values_from = cost) %>%
# as.data.frame()
# }
# if (weekly == "W" | weekly == "w") {
# weekly_table <<- smr_w %>%
# dplyr::select(-cumulative_spend) %>%
# tidyr::pivot_wider(names_from = category, values_from = cost) %>%
# as.data.frame()
# }
# }
create_summary_tables(data, "n", "n", "w")
|
9cfdb50d29c80799d28a5f1a8f4925dc1d207bc9
|
cea78c8386e4f72501280eb624b5242eb215b08c
|
/plot1.R
|
289d36e52fc81eb3ce03d5e5918c8edbf0c911b2
|
[] |
no_license
|
bionicturtle/ExData_Plotting1
|
883ba29c20e963f4c93d8b95c2d0c00aace37a92
|
2f0bd2c35ea2f07a0fa648526c90fc0cdfe443d9
|
refs/heads/master
| 2021-01-17T11:43:43.480391
| 2014-09-06T18:08:40
| 2014-09-06T18:08:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,373
|
r
|
plot1.R
|
plot1 <- function() {
# Due to very large file size (~130 MB), the original dataset is not pushed to my github
# Below I show the local code is here, which reads the large dataset, 2,075,259 observations (rows)
# Then writes to a much smaller file. So, plots 1 to 4 read the smaller (fewer rows) file size of 292K
# 1. epcData <- read.csv("household_power_consumption.txt",sep=";", stringsAsFactors=FALSE)
# 2. subset feb 1st and 2nd, 2007 which is only 10,093 observerations
# 3. feb12 <- subset(epcData, Date=="1/2/2007" | Date=="2/2/2007")
# 4. write.csv(feb12, "household_power_consumption_1_2_feb_2007.txt", row.names=FALSE)
# read the subsetted text file
feb12 <- read.csv("household_power_consumption_1_2_feb_2007.txt", stringsAsFactors=FALSE)
# 'Date' field in original dataset is character mode
# strptime() function converts the Date+Time char into an POSIXlt object
feb12$date_time = strptime(paste(feb12$Date, feb12$Time), format = "%d/%m/%Y %H:%M:%S")
feb12$Global_active_power <- as.numeric(feb12$Global_active_power)
# Plot 1 and copy to png device
hist(feb12$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)", ylab="Frequency")
dev.copy(png, file="plot1.png")
dev.off()
}
|
5afe3176c629a4908e709e26df80c179a2201c3f
|
a2ccc1c1c2c06f9d533c8fb78234509fe61cd478
|
/bin/RF_200kSnps_populations.R
|
9545f3fa433f128d95ff5bcc973c90d35543ecf5
|
[
"MIT"
] |
permissive
|
lifebit-ai/siteqc
|
e6d8edb63e5677b444ab740075dbbf48a3fbcb0a
|
69425e964fd3a758085ecf854496614a0c3121e7
|
refs/heads/master
| 2023-01-31T23:44:21.731094
| 2020-11-10T14:43:57
| 2020-11-10T14:43:57
| 285,573,935
| 0
| 3
|
MIT
| 2020-11-30T12:09:33
| 2020-08-06T13:06:13
|
Shell
|
UTF-8
|
R
| false
| false
| 13,157
|
r
|
RF_200kSnps_populations.R
|
#module load lang/R/3.6.0-foss-2019a
lapply(c("data.table", "tidyverse", "magrittr"), library, character.only = T)
#Produce RF predicting the pops based on projected SNPs on 1000kgp3
#Read population labels
indiv_pop<-fread("/re_gecip/BRS/thanos/ethnicities_aggV2/1KGP3.sample_table")
super_pop<-fread("/re_gecip/BRS/thanos/ethnicities_aggV2/super_pop_codes.tsv")
phen<-merge(indiv_pop,super_pop,by="Population")
#Read plate_keys of 1KGP3 unrelated individuals
k1g<-fread("/re_gecip/BRS/thanos/ethnicities_aggV2/1KGP3_30K_unrel_autosomes.fam")[,1]
#Read 1KGP3 PCs and their projection to aggV2 with 200k SNPs
pcsk1g<-fread("/re_gecip/BRS/thanos/ethnicities_aggV2/aggV2_ethn_200Ksites/hwe10e-6_superpops_195ksnps_1KGP3samples_unrel.eigenvec")[,-2]
pcsgel<-fread("/re_gecip/BRS/thanos/ethnicities_aggV2/aggV2_ethn_200Ksites/autosomes_LD_pruned_1kgp3Intersect_ALL_hwe10e-6.proj.eigenvec")[,-c(2,23)]
#Set up sample names
colnames(k1g)[1]="Sample"
colnames(pcsk1g)[1]="Sample"
colnames(pcsk1g)[2:21]=paste0("PC",1:20)
colnames(pcsgel)[1]="Sample"
colnames(pcsgel)[2:21]=paste0("PC",1:20)
#Create training data
train_data<-merge(phen,k1g,by="Sample")
train_data<-merge(train_data,pcsk1g,by="Sample")
## RANDOM FOREST
#Train on subpops
library(randomForest)
set.seed(123)
#Lets see how many PCs and how many trees we should be using.
grid_search_parameters <- function(pop){
#Function runs grid search for PCs and Ntrees as defined in the code below.
#Supply pop as either Super_population, or Population to train on super or sub pops
res <- list(ntrees = c(1,10,20,30,40,50,100,200,300,400,500,1000),
npcs = c(1:20)) %>%
cross_df() %>%
mutate(err.rate = NA)
for(i in 1:nrow(res)){
#Define our variables for each run
ntrees <- res$ntrees[i]
npcs <- res$npcs[i]
rfdat=train_data[,1:(5+npcs)] %>% as_tibble()
dat <- rfdat[,6:ncol(rfdat)]
y = select(rfdat, !!pop) %>%
mutate(inpop = as.factor(!!as.symbol(pop)))
#Train random forest algorithm on individual population labels
fit_pop=randomForest(dat, y=y$inpop, ntree=ntrees)
#Store the best OOB for each model
res$err.rate[i] <- min(fit_pop$err.rate[,1])
}
return(res)
}
#Run search
pop_super <- grid_search_parameters(pop = "Super_Population")
pop_sub <- grid_search_parameters(pop = 'Population')
#The above searches suggest that the best performing models are
pop_super %>% arrange(err.rate)
pop_sub %>% arrange(err.rate)
###Based on the above, going to use the following
# For the sub-pop prediction, 20PCs, and 400 trees
# For the super-pop prediction, 8PCs, and 400
rfdat=train_data[,1:(5+20)] #the plus indicates the number of PCs included
#Train random forest algorithm on individual population labels
fit_pop=randomForest(rfdat[,6:ncol(rfdat)],
y=as.factor(rfdat$Population),
ntree=400,
keep.inbag = T)
rfdat=train_data[,1:(5+8)]
#Train random forest algorithm on individual population labels
fit_super=randomForest(rfdat[,6:ncol(rfdat)],
y=as.factor(rfdat$Super_Population),
ntree=500,
keep.inbag = T)
#Lets see how we do in our sub pop when we compress to super pops
#Take the confusion matrix, rename the variables and sum the matrix by groups
confmat <- fit_pop$confusion
#Drop the error col
confmat <- confmat[,-ncol(confmat)]
superlabs <- super_pop %>% select(Population, Super_Population)
labs <- colnames(confmat) %>%
tibble::enframe(name = NULL) %>%
left_join(superlabs, by=c('value'='Population'))
colnames(confmat) <- rownames(confmat) <- labs$Super_Population
tmp <- rowsum(confmat, row.names(confmat))
tmp <- tmp %>% t()
tmp <- rowsum(tmp, row.names(tmp))
adjusted_confmat <- t(tmp)
#Use fitted pop model to predict on projected GEL PCs
pred_GEL_pop = predict(fit_pop,pcsgel,type="prob")
pred_GEL_pop=data.frame(pcsgel$Sample,pred_GEL_pop)
#Use fitted model to predict on projected GEL PCs
pred_GEL_super = predict(fit_super,pcsgel,type="prob")
pred_GEL_super=data.frame(pcsgel$Sample,pred_GEL_super)
pop_prob<-list()
spops=unique(phen$Super_Population)
#pdf("singlepop_ancestry_vs_superanc.pdf",height=6,width=9)
#par(mfrow=c(2,3))
assign_pops <- function(x){
#For each super-population sum the probabilities of assignement to each subpopulation for pop_prob and get the super-pop predicted values for super_prob.
for (lpop in 1:length(spops)){
focal_pop=sort(match(unlist(super_pop[which(super_pop$Super_Population==spops[lpop]),1]),colnames(pred_GEL_pop)))
pop_prob[[lpop]]=rowSums(pred_GEL_pop[,focal_pop])
super_prob=pred_GEL_super[,which(colnames(pred_GEL_super)==spops[lpop])]
#plot(super_prob,pop_prob[[lpop]],xlab="Super pop trained probability",ylab="Sum prob of single pops",main=paste0(spops[lpop],",cor=",round(cor(super_prob,pop_prob[[lpop]]),2)),pch=20)
#abline(0,1)
}
#dev.off()
#Bind and format final tables
superpops_probs=data.frame(pcsgel$Sample,do.call("cbind",pop_prob))
colnames(superpops_probs)=c("Sample",spops)
colnames(pred_GEL_pop)[1]="Sample"
#merge individual pop probabilites and super-pop probabilities
ancestries=merge(superpops_probs,pred_GEL_pop,by="Sample")
}
#write-out inferred ancestries
write.table(ancestries,"aggV2_ancestry_assignment_probs_1KGP3_200K.tsv",row.names=F,quote=F)
#################
###Further investigation
lapply(c('ggplot2','dplyr','magrittr','randomForest','data.table'),
library, as.character = T)
##### Checking to see what the ancestry assignment is for the HWE-2 + HWE-10 data based on varying thresholds of probability
fit_model <- function(pcsgel, pcsk1g){
#Read population labels
indiv_pop<-fread("/re_gecip/BRS/thanos/ethnicities_aggV2/1KGP3.sample_table")
super_pop<-fread("/re_gecip/BRS/thanos/ethnicities_aggV2/super_pop_codes.tsv")
phen<-merge(indiv_pop,super_pop,by="Population")
#Read plate_keys of 1KGP3 unrelated individuals
k1g<-fread("/re_gecip/BRS/thanos/ethnicities_aggV2/1KGP3_30K_unrel_autosomes.fam")[,1]
#Set up sample names
colnames(k1g)[1]="Sample"
colnames(pcsk1g)[1]="Sample"
colnames(pcsk1g)[2:21]=paste0("PC",1:20)
colnames(pcsgel)[1]="Sample"
colnames(pcsgel)[2:21]=paste0("PC",1:20)
#Create training data
train_data<-merge(phen,k1g,by="Sample")
train_data<-merge(train_data,pcsk1g,by="Sample")
#First fit the model
#Checking on super populations with 6 pcs and 400 trees
rfdat=train_data[,1:(5+6)] #the plus indicates the number of PCs included
#Train random forest algorithm on individual population labels
fit_super=randomForest(rfdat[,6:ncol(rfdat)],
y=as.factor(rfdat$Super_Population),
ntree=400,
keep.inbag = T)
l <- list()
l$model <- fit_super
l$rfdat <- train_data[,1:(5+6)]
l$pcsgel <- pcsgel
l$pcsk1g <- pcsk1g
return(l)
}
pcsk1ghwe2<-fread("/re_gecip/BRS/thanos/ethnicities_aggV2/aggV2_ethn_200Ksites/hwe10e-2_superpops_195ksnps_1KGP3samples_unrel.eigenvec")[,-2]
pcsgelhwe2 <- fread("/re_gecip/BRS/thanos/ethnicities_aggV2/aggV2_ethn_200Ksites/autosomes_LD_pruned_1kgp3Intersect_ALL_hwe10e-2.proj.eigenvec")[,-c(2,23)]
pcsk1ghwe6<-fread("/re_gecip/BRS/thanos/ethnicities_aggV2/aggV2_ethn_200Ksites/hwe10e-6_superpops_195ksnps_1KGP3samples_unrel.eigenvec")[,-2]
pcsgelhwe6 <- fread("/re_gecip/BRS/thanos/ethnicities_aggV2/aggV2_ethn_200Ksites/autosomes_LD_pruned_1kgp3Intersect_ALL_hwe10e-6.proj.eigenvec")[,-c(2,23)]
modhwe2 <- fit_model(pcsgelhwe2, pcsk1ghwe2)
modhwe6 <- fit_model(pcsgelhwe6, pcsk1ghwe6)
#Save the models and associated datat
modhwe2 %>% saveRDS('/re_gecip/shared_allGeCIPs/drhodes/Aggregation_79k/out_actual/Ancestries/InvestigateHWE/hwe2_pcs6_trees400.RDS')
modhwe6 %>% saveRDS('/re_gecip/shared_allGeCIPs/drhodes/Aggregation_79k/out_actual/Ancestries//InvestigateHWE/hwe6_pcs6_trees400.RDS')
#Now check the labels
pred_labels <- function(model){
pred_GEL_super = predict(model$model,model$pcsgel,type="prob")
pred_GEL_super=data.frame(model$pcsgel$Sample,pred_GEL_super) %>% as_tibble()
}
#How are the ancestry assignments across them
maxpop <- function(labs){
preds <- c('AFR','AMR','EAS','EUR','SAS')
labs %<>%
mutate(ethnmax = apply(.[,preds], 1,
function(x) names(x)[which.max(x)]))
labs %<>%
mutate(ethn0.8 =
ifelse(apply(.[,preds], 1, function(x) max(x)) > 0.8, 1, 0),
ethn0.5 =
ifelse(apply(.[,preds], 1, function(x) max(x)) > 0.5, 1, 0))
return(labs)
}
labhwe2 <- pred_labels(modhwe2) %>% maxpop()
labhwe6 <- pred_labels(modhwe6) %>% maxpop()
out <- left_join(labhwe2, labhwe6, by = 'model.pcsgel.Sample', suffix = c(".hwe2",".hwe6"))
out %>% fwrite('/re_gecip/shared_allGeCIPs/drhodes/Aggregation_79k/out_actual/Ancestries/InvestigateHWE/anc_probs_hwe2_hwe6_multiProbThreshold.tsv',
sep='\t')
count(out, ethn0.8, ethnmax) %>% filter(ethn0.8 ==1)
#Lets also just look at the concordance with the 30k data
#To do this I have to repeat the above but for the 30k
pcsk1g30<-fread("/re_gecip/BRS/thanos/ethnicities_aggV2/1KGP3_30K_unrel_autosomes.eigenvec")[,-2]
pcsgel30<-fread("/re_gecip/BRS/thanos/ethnicities_aggV2/aggV2_1KGP3_30K_projection.proj.eigenvec")[,-c(2,23)]
k30model <- fit_model(pcsgel30, pcsk1g30)
k30labs <- pred_labels(k30model) %>% maxpop()
tmp <- labhwe6 %>% left_join(k30labs, by='model.pcsgel.Sample', suffix = c('hwe6','k30'))
tmp %>% filter(ethnmaxhwe6 == ethnmaxk30 & ethn0.8hwe6 == ethn0.8k30)
#Compare the probabilities for the train/pred super vs train/pred sub
subsuper <- ancestries %>% select(Sample, AFR, SAS, EAS, EUR, AMR) %>%
left_join(pred_GEL_super, by=c('Sample'='pcsgel.Sample'),
suffix = c('.sub','.super'))
library(ggplot)
library(patchwork)
#Plot some things
df <- subsuper
plots <- lapply(c('AFR','AMR','EUR','SAS','EAS'), function(x) {
popsub <- paste0(x, '.sub')
popsuper <- paste0(x,'.super')
df %>%
ggplot(aes_string(popsub, popsuper)) +
geom_point(alpha = 0.3) +
geom_abline(intercept = 0, slope = 1) +
geom_vline(xintercept = 0.8, colour = 'red', linetype = 'dashed') +
geom_hline(yintercept = 0.8, colour = 'red', linetype = 'dashed') +
theme_minimal()
})
probs <- c('AFR','SAS','EAS','AMR','EUR')
subprobs <- paste0(probs,'.sub')
superprobs <- paste0(probs, '.super')
df %<>%
mutate(ethnmax_super = apply(.[,superprobs], 1,
function(x) names(x)[which.max(x)]))
df %<>%
mutate(ethn0.8_super =
ifelse(apply(.[,superprobs], 1, function(x) max(x)) > 0.8, 1, 0))
df %<>%
mutate(ethnmax_sub = apply(.[,subprobs], 1,
function(x) names(x)[which.max(x)]))
df %<>%
mutate(ethn0.8_sub =
ifelse(apply(.[,subprobs], 1, function(x) max(x)) > 0.8, 1, 0))
df %<>% mutate(matching = ifelse(
gsub('\\..*','',ethnmax_sub) ==
gsub('\\..*','',ethnmax_super), 1, 0 ))
df %>% count(matching) %>% mutate(perc = n/sum(n) *100)
#So we end up with a 95.6% match between our super and sub pop assignments.
#Lets look at those that don't match
df %>%
filter(matching == 0) %>%
ggplot(aes(EUR.sub, EUR.super)) +
geom_point(alpha = 0.3) +
xlim(c(0,1)) +
geom_abline(intercept = 0, slope = 1) +
geom_vline(xintercept = 0.8, colour = 'red', linetype = 'dashed') +
geom_hline(yintercept = 0.8, colour = 'red', linetype = 'dashed') +
theme_minimal()
#Now we need to bring the 30k SNPs and probabilities into this
k30 <- fread('aggV2_ancestry_assignment_probs_1KGP3_30K.tsv') %>% as_tibble()
#These are sub population based super pop estimates, let's compare the 30k snp sub pops agains 200k subpops
df2 <- df %>% select(Sample, AFR.sub, SAS.sub, EAS.sub, AMR.sub, EUR.sub) %>%
left_join(select(k30, AFR, SAS, EAS, AMR, EUR, Sample), by='Sample')
names(df2) <- c('sample',
paste0(probs, '.200ksub'), paste0(probs, '.30ksub'))
plots <- lapply(probs, function(x) {
popsub <- paste0(x, '.200ksub')
popsuper <- paste0(x,'.30ksub')
df2 %>%
ggplot(aes_string(popsub, popsuper)) +
geom_point(alpha = 0.3) +
geom_abline(intercept = 0, slope = 1) +
geom_vline(xintercept = 0.8, colour = 'red', linetype = 'dashed') +
geom_hline(yintercept = 0.8, colour = 'red', linetype = 'dashed') +
theme_minimal()
})
probs30k <- paste0(probs,'.30ksub')
probs200k <- paste0(probs, '.200ksub')
df2 %<>%
mutate(ethnmax_30k = apply(.[,probs30k], 1,
function(x) names(x)[which.max(x)]))
df2 %<>%
mutate(ethn0.8_30k =
ifelse(apply(.[,probs30k], 1, function(x) max(x)) > 0.8, 1, 0))
df2 %<>%
mutate(ethnmax_200k = apply(.[,probs200k], 1,
function(x) names(x)[which.max(x)]))
df2 %<>%
mutate(ethn0.8_200k =
ifelse(apply(.[,probs200k], 1, function(x) max(x)) > 0.8, 1, 0))
df2 %<>% mutate(matching = ifelse(
gsub('\\..*','',ethnmax_30k) ==
gsub('\\..*','',ethnmax_200k), 1, 0 ))
df2 %>% count(matching) %>% mutate(perc = n/sum(n) *100)
|
2d5cca52abe61040aa4a39551ac6e65574c092b0
|
a27d9b26dca0897fbe312fb672230698441cc5a6
|
/getXLSXTextColour.R
|
7968b0db646caa260d6292f502bb3103fd66ff75
|
[] |
no_license
|
seannyD/ConvertExcelTextCoding
|
4568cd5703c9f1c13a6cdf807236ffa31c5f3330
|
abd008405965f5be691c76f4660bde1a988e5b5b
|
refs/heads/master
| 2021-01-01T04:49:15.980085
| 2017-07-14T17:26:18
| 2017-07-14T17:26:18
| 97,256,725
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,719
|
r
|
getXLSXTextColour.R
|
# Load an excel file and convert a column to a category based on text colour.
library(xlsx)
# Hint: after making a CellStyle object called cellStyle, run names(cellStyle) to get a list of functions that can be run to get different properties of the font/background colour.
getCellFontColour = function(cell){
# Font colours can either be indexed or full hex values
cellStyle = getCellStyle(cell)
fontColour = cellStyle$getFont()
rgb <- tryCatch(fontColour$getRgb(), error = function(e) NULL)
if(!is.null(rgb)){
return(rgb)
} else{
return(cellStyle$getFont()$getThemeColor())
}
}
# the file to convert
filename = "~/Desktop/TextColours.xlsx"
# the file to write the results to
outfilename = "~/Desktop/TextColours_withCategories.csv"
# the column you want to convert in the file.
columnToConvert = 2
# Load the workbook (we need to do it this way to get at the formatting options)
wb <- xlsx::loadWorkbook(filename)
# Get the sheets in the workbook
sheets <- getSheets(wb)
# Get the first sheet
sheet <- sheets[[1]]
# get rows in the sheet (the data for all rows)
rows <- getRows(sheet)
# get the data in the column to be converted
cells <- getCells(rows, colIndex =columnToConvert)
# Get the text colours in the column
colours = sapply(cells,getCellFontColour)
# cut out the first row (header)
colours = colours[2:length(colours)]
# Convert the colours to an integer
colour.categories = as.integer(as.factor(colours))
# Read in the data (again, but as a data frame)
wb2 = xlsx::read.xlsx(filename, sheetIndex=1, header=TRUE)
# Add the extra column of colours converted to an integer
wb2 = cbind(wb2, colour.categories)
# Write out the data to a new file
write.csv(wb2, file=outfilename)
|
266eb2004c8814b3e0fc9604aff7fd5c76aed36d
|
ea5df7e31a71a1eb8de97680c20cdf8a0dc8fb57
|
/run_analysis.R
|
76c1e807e2ae33f2c9e7e4213ac9a0f9bcaaea42
|
[] |
no_license
|
uredkar/datasciencecoursera
|
fe80464582b74ac73384eb57f434b1901453c24a
|
1d7ab122599e5b436417088fa95ef068f382799e
|
refs/heads/master
| 2020-05-30T18:50:28.125868
| 2014-04-19T17:54:33
| 2014-04-19T17:54:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,129
|
r
|
run_analysis.R
|
library(reshape2)
library(data.table)
# this is where the files are stored
setwd("C:/Sources/cousera/getdata-002/Assignment1/UCI HAR Dataset")
features = read.table("features.txt",sep= "")
activities = read.table("activity_labels.txt",sep= "")
# load test
x_test = read.table("test/X_test.txt",sep= "")
y_test = read.table("test/y_test.txt",sep= "")
subject_test = read.table("test/subject_test.txt",sep= "")
# buld the test dataset
x_test2 = cbind(y_test,subject_test,x_test)
# load train
x_train = read.table("train/x_train.txt",sep= "")
y_train = read.table("train/y_train.txt",sep= "")
subject_train = read.table("train/subject_train.txt",sep= "")
# buld the train dataset
y_train2 = cbind(y_train,subject_train,x_train)
#combine test and train dataset
alldt <- rbind(x_test2, y_train2)
# add Activities and Subject columns to all the features
colnames = c("Activities","Subject",as.character(features$V2))
# assign column name to the dataframe/dataset
names(alldt) = colnames
# convert the activity id to label
activitylabel = sapply(alldt$Activities,FUN = function(x ) activities$V2[x])
#assign activity label to common dataset
alldt$Activities = activitylabel
# clear a function to filter columns
matchMeanAndStd = function (s)
{
if (length(grep("*mean()*",s)) > 0)
{
return(TRUE)
}
if (length(grep("*std()*",s)) > 0)
{
return(TRUE)
}
if (length(grep("*Activities *",s)) > 0)
{
return(TRUE)
}
if (length(grep("*Subject *",s)) > 0)
{
return(TRUE)
}
return(FALSE)
}
#use the above function to get columns that match the filer
matched = lapply(colnames,matchMeanAndStd)
# create the first data set which has the required columns
meanvarDFFirst = alldt[,colnames[which(c(unlist(matched)))] ]
# convert the first dataset to a table
meanvarSecond = as.data.table(meanvarDFFirst)
# do a group by Subject, Activities and get a mean i.e average for all columns
group = meanvarSecond[, lapply(.SD, mean), by = c("Subject","Activities")]
# save the tidy data table for export
write.table(group, file = "tidy.txt")
# check if saved correctly
tidy = read.table("tidy.txt")
|
83de65b17335b6e94588d9b9f389766149d67de8
|
984af1093a5185c475a24bd8fcb420906d770b2c
|
/hw2temp/question6.R
|
c930f25e7498db70179d9466f2213f7c1e238586
|
[] |
no_license
|
gracewindheim/gmwDataSci
|
1a4f2209394824a5276a3c81cd68f61d52fde9ff
|
9bb83231b14b0e56a7718f064ab0e816e31e7e93
|
refs/heads/master
| 2022-07-16T16:00:05.734336
| 2020-05-14T01:33:06
| 2020-05-14T01:33:06
| 235,359,264
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 519
|
r
|
question6.R
|
#question 6
#Grace Windheim
birthday = function(people) {
n = 365
perm = 1
if (people <= 0) {
print(0)
} else {
for (i in 1:people) {
perm = perm * (n - (i-1))
}
cprob = perm / (365^people)
prob = 1 - cprob
print(prob)
}
}
#create a plot of probability by num people
x = c(1:100)
for (i in 1:100) {
x[i] = birthday(i)
}
r = data.frame(n = 1:100, Probability = x)
plot(r, main="Probability of two people in a room of n people sharing
a birthday")
|
5f0610385dc04e933440ae4aae4d273e6987015b
|
c9d7e1396064a7f5f59557ed90dd89dc88d7dc41
|
/R/scores_lnorm_discrete.R
|
522083b1a7ffc93bf7695f862ca447c777fe2a7f
|
[] |
no_license
|
HIDDA/forecasting
|
47c20ba21a35ccd2d259a58f81fdd53f13f07684
|
5dbfcbfa616a8f097adc71d071418e026eff3beb
|
refs/heads/master
| 2021-06-06T16:01:53.562693
| 2021-03-31T15:55:04
| 2021-03-31T15:57:06
| 103,663,438
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,106
|
r
|
scores_lnorm_discrete.R
|
################################################################################
### Proper Scoring Rules for *Discretized* Log-Normal Forecasts
###
### Copyright (C) 2019 Sebastian Meyer
###
### This file is part of the R package "HIDDA.forecasting",
### free software under the terms of the GNU General Public License, version 2,
### or (at your option) any later version, a copy of which is available at
### https://www.R-project.org/Licenses/.
################################################################################
##' Proper Scoring Rules for *Discretized* Log-Normal Forecasts
##'
##' Compute scores for discretized log-normal forecasts.
##' The function is vectorized and preserves the dimension of the input.
##'
##' @inheritParams scores_lnorm
##' @return scores for the predictions of the observations in `x` (maintaining
##' their dimensions).
##' @importFrom stats setNames plnorm qlnorm
##' @export
scores_lnorm_discrete <- function (x, meanlog, sdlog, which = c("dss", "logs"))
{
scorelist <- lapply(X = setNames(paste0(which, "_lnorm_discrete"), nm = which),
FUN = do.call,
args = alist(y = x, meanlog = meanlog, sdlog = sdlog),
envir = environment()) # to resolve x, meanlog, sdlog
simplify2array(scorelist, higher = TRUE)
}
## log-score for a discretized log-normal forecast
logs_lnorm_discrete <- function (y, meanlog = 0, sdlog = 1)
{
-logdlnorm_discrete(y, meanlog, sdlog)
}
## compute (log-)probabilty of x according to discretized LN
dlnorm_discrete <- function (x, meanlog = 0, sdlog = 1, log = FALSE)
{
if (log) {
logdlnorm_discrete(x, meanlog, sdlog)
} else {
plnorm(x + 0.5, meanlog, sdlog) - plnorm(x - 0.5, meanlog, sdlog)
}
}
logdlnorm_discrete <- function (x, meanlog = 0, sdlog = 1)
{
## compute log(1 - exp(x)), R translation of R_Log1_Exp from src/nmath/dpq.h
log_1mexp <- function (x) {
ifelse(x > -log(2), log(-expm1(x)), log1p(-exp(x)))
}
## compute log (exp (logx) - exp (logy)), C version in src/nmath/pgamma.c
logspace_sub <- function (logx, logy) {
logx + log_1mexp(logy - logx)
}
logspace_sub(plnorm(x + 0.5, meanlog, sdlog, log.p = TRUE),
plnorm(x - 0.5, meanlog, sdlog, log.p = TRUE))
}
## compute Dawid-Sebastiani score for discretized LN
dss_lnorm_discrete <- function (y, meanlog = 0, sdlog = 1)
{
## mean is preserved
m <- exp(meanlog + sdlog^2/2)
## variance is increased by 1/12 through rounding
## (for reasonably large original mean and variance)
## see also https://stats.stackexchange.com/questions/209260
v0 <- m^2 * expm1(sdlog^2)
if (any(m < 3 | v0 < 0.25))
warning("unsuitable approximation")
v <- v0 + 1/12
## v_approx <- mapply(FUN = function (m, s) {
## xgrid <- floor(qlnorm(1e-12, m, s)) : ceiling(qlnorm(1-1e-12, m, s))
## p <- dlnorm_discrete(xgrid, m, s)
## sum(p * xgrid^2) - sum(p * xgrid)^2
## }, m = meanlog, s = sdlog, USE.NAMES = FALSE)
(y - m)^2 / v + log(v)
}
|
587ac03637f3bb160397b1b7f9d5e29d0caea546
|
a9793f1bb4803bf57c0bf93978693b9ffd1afef4
|
/man/LIN3df.Rd
|
f533c2ce200e04b2436ce0f6e3aab4eebbcc795b
|
[] |
no_license
|
gsoutinho/survrec
|
632b42f857cb3429a416ea2b61642c3cfb6e7a5e
|
3c7bd0ff4a7d06ebb39a93aa700ccde46b5ff839
|
refs/heads/main
| 2023-08-16T05:54:22.941797
| 2021-10-20T13:42:08
| 2021-10-20T13:42:08
| 418,166,755
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,611
|
rd
|
LIN3df.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LIN3df.R
\name{LIN3df}
\alias{LIN3df}
\title{Lin's estimator for the general case of K gap times distribution function.}
\usage{
LIN3df(object, x, y, z)
}
\arguments{
\item{object}{An object of class multidf.}
\item{x}{The first time for obtaining estimates for the general case of distribution function.}
\item{y}{The second time for obtaining estimates for the general case of distribution function.}
\item{z}{The third time for obtaining estimates for the general case of distribution function.}
}
\value{
Vector with the Lin's estimates for the general case of K gapes times distribution function.
}
\description{
Provides estimates for the bivariate distribution function based on the extension the Lin's
estimator to the general case of K gap times.
}
\examples{
b4state <- multidf(time1=bladder5state$y1, event1=bladder5state$d1,
time2= bladder5state$y1+bladder5state$y2, event2=bladder5state$d2,
time=bladder5state$y1+bladder5state$y2+bladder5state$y3, status=bladder5state$d3)
head(b4state)[[1]]
LIN3df(b4state,x=13,y=20,z=40)
b4 <- multidf(time1=bladder4$t1, event1=bladder4$d1,
time2= bladder4$t2, event2=bladder4$d2,
time=bladder4$t3, status=bladder4$d3)
LIN3df(b4,x=13,y=20,z=40)
}
\references{
Lin, D. Y., Sun, W. and Ying, Z. (1999). Nonparametric estimation of the gap time distributions
for serial events with censored data, Biometrika 86, 59-70.
}
\seealso{
\code{\link{LDM3df}}, \code{\link{KMW3df}} and \code{\link{WCH3df}}.
}
\author{
Gustavo Soutinho and Luis Meira-Machado
}
|
800c04f39f7f681c36ded19779349f3a0c63d28b
|
13895420920703501ab66c28a3927089a2de042e
|
/R/cluster.plot.R
|
68682904cfd0e51cc41fb64ef8753e1f80a9a846
|
[] |
no_license
|
cran/psych
|
3349b3d562221bb8284c45a3cdd239f54c0348a7
|
ee72f0cc2aa7c85a844e3ef63c8629096f22c35d
|
refs/heads/master
| 2023-07-06T08:33:13.414758
| 2023-06-21T15:50:02
| 2023-06-21T15:50:02
| 17,698,795
| 43
| 42
| null | 2023-06-29T05:31:57
| 2014-03-13T05:54:20
|
R
|
UTF-8
|
R
| false
| false
| 4,279
|
r
|
cluster.plot.R
|
#revised Sept 16, 2013 to give control over the positon (pos) and size (cex) of the labels
#revised June 21, 2016 to allow naming of the points
"cluster.plot" <-
function(ic.results,cluster=NULL,cut = 0.0,labels=NULL, title="Cluster plot",pch=18,pos,show.points=TRUE,choose=NULL,...) {
if (!is.matrix(ic.results) ) {if (!is.null(class(ic.results)) ) {
if(inherits(ic.results[1],"kmeans")) { load <- t(ic.results$centers) } else {
load <-ic.results$loadings} }} else {load <- ic.results}
if(!is.null(choose)) load <- load[,choose,drop=FALSE]
nc <- dim(load)[2]
nvar <- dim(load)[1]
#defined locally, so as to be able to pass a parameter to it
"panel.points" <-
function (x, y, pch = par("pch"), ...)
{ ymin <- min(y)
ymax <- max(y)
xmin <- min(x)
xmax <- max(x)
ylim <- c(min(ymin,xmin),max(ymax,xmax))
xlim <- ylim
if(show.points) points(x, y, pch = pch,ylim = ylim, xlim= xlim,...)
text(x,y,vnames,...)
}
if(missing(pos)) pos <- rep(1,nvar) #this allows more control over plotting
ch.col=c("black","blue","red","gray","black","blue","red","gray")
if (is.null(cluster)) {
cluster <- rep(nc+1,nvar)
cluster <- apply( abs(load) ,1,which.max)
cluster[(apply(abs(load),1,max) < cut)] <- nc+1
}
if (nc > 2 ) {
vnames <- labels #global variable
pairs(load,pch = cluster+pch,col=ch.col[cluster],bg=ch.col[cluster],main=title,lower.panel=panel.points,upper.panel=panel.points,...) }
else {
if(show.points) { plot(load,pch = cluster+pch,col=ch.col[cluster],bg=ch.col[cluster],main=title,...) } else {
plot(load,pch = cluster+pch,col=ch.col[cluster],bg=ch.col[cluster],main=title,type="n",...)
pos=NULL}
abline(h=0)
abline(v=0)}
if(is.null(labels)) labels <- paste(1:nvar)
if(nc < 3) text(load,labels,pos=pos,...)
}
"factor.plot" <-
function(ic.results,cluster=NULL,cut = 0.0,labels=NULL, title,jiggle=FALSE,amount=.02,pch=18,pos,show.points=TRUE,...) { #deprecated
fa.plot(ic.results,cluster=cluster,cut=cut,labels=labels,title=title,jiggle=jiggle,amount=amount,pch=pch,pos=pos,show.points=show.points,...)
}
"fa.plot" <-
function(ic.results,cluster=NULL,cut = 0.0,labels=NULL, title,jiggle=FALSE,amount=.02,pch=18,pos,show.points=TRUE,choose=NULL,main=NULL,...) {
if(missing(title) ) { title="Plot"
if (length(class(ic.results)) >1 ) {if (inherits(ic.results, "fa")) {title = "Factor Analysis"} else {
if (inherits(ic.results,"principal")) {title = "Principal Component Analysis"}
} }
}
if(missing(main)) {main<- title} else {title <- main} #getting rid of confusion from years ago
if (!is.matrix(ic.results)) {
if (!is.null(class(ic.results))) {
if(inherits(ic.results, "kmeans")) { load <- t(ic.results$centers) } else {
load <-ic.results$loadings} }} else {load <- ic.results}
if(is.null(colnames(load))) colnames(load) <- paste("F",1:ncol(load),sep="")
if(!is.null(choose)) load <- load[,choose,drop=FALSE]
nc <- dim(load)[2]
nvar <- dim(load)[1]
if(missing(pos)) pos <- rep(1,nvar) #this allows more control over plotting
ch.col=c("black","blue","red","gray","black","blue","red","gray")
if (is.null(cluster)) {
cluster <- rep(nc+1,nvar)
cluster <- apply( abs(load) ,1,which.max)
cluster[(apply(abs(load),1,max) < cut)] <- nc+1
}
#define this function inside the bigger function so that vnames is globabl to it
"panel.points" <-
function (x, y, pch = par("pch"), ...)
{ ymin <- min(y)
ymax <- max(y)
xmin <- min(x)
xmax <- max(x)
ylim <- c(min(ymin,xmin),max(ymax,xmax))
xlim <- ylim
if(show.points) points(x, y, pch = pch,ylim = ylim, xlim= xlim,...)
text(x,y,vnames,...)
}
if(jiggle) load <- jitter(load,amount=amount)
if (nc > 2 ) {
vnames <- labels #
pairs(load,pch = cluster+pch,col=ch.col[cluster],bg=ch.col[cluster],lower.panel=panel.points,upper.panel = panel.points,main=title,...) }
else {
if(show.points) { plot(load,pch = cluster+pch,col=ch.col[cluster],bg=ch.col[cluster],main=title,...) } else {
plot(load,pch = cluster+pch,col=ch.col[cluster],bg=ch.col[cluster],main=title,type="n",...)
pos=NULL}
abline(h=0)
abline(v=0)
if(is.null(labels)) labels <- paste(1:nvar)
text(load,labels,pos=pos,...)}
}
|
3cf0ccc52b06fa9f952193f329614ea649e027e6
|
f582870743276a16a61cde20a8018fdd199c764a
|
/NFL-ML.R
|
de9a99508bbd0d6b521c4374c7c341be34fca2b3
|
[] |
no_license
|
jordanodonnell138/NFL-ML
|
d636e1ce1b0187b0906cb3cb76defbda7669c705
|
8eb87fe95183efe9d765de3a290dc940d3aa6d0c
|
refs/heads/master
| 2020-04-14T04:12:59.820225
| 2017-03-14T03:51:19
| 2017-03-14T03:51:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,575
|
r
|
NFL-ML.R
|
#lets try to build a general model for all teams. Lets start with a middle of the road
#team. Maybe that will give better predictability for ALL teams when compared to
#training a model for only one team.
#last season, the Atlanta falcons went 8-8. They also have a points scored and
#points against that is only -6. The lowest of any team in the NFL last season.
#PCR has given the best sort of results. So lets drag, drop, and analyze.
rm(list = ls())
library(ISLR)
library(MASS)
library(boot)
library(tree)
library(glmnet)
library(leaps)
library(gam)
library(pls)
library(nflscrapR)
library(caret)
library(kernlab)
library(e1071)
library(randomForest)
library(SparseM)
set.seed(49)
setwd("C:/Users/Nathan/Downloads/NFL")
NFL= data.frame(read.csv('NFLPlaybyPlay2015.csv',stringsAsFactors = FALSE))
attach(NFL)
team.all = which(posteam =="DEN" | DefensiveTeam == "DEN")
team.all = NFL[team.all,]
team.off = which(posteam =="DEN")
team.Def = which(DefensiveTeam == "DEN")
NFL.team = team.all
NFL.team.off = NFL[team.off,]
team.score.offense = NFL.team.off[c('yrdln','yrdline100','ydsnet',
'Yards.Gained','Touchdown','sp',
'AbsScoreDiff','Drive',
'TimeSecs','PosTeamScore','GoalToGo')]
print(cov(team.score.offense))
detach(NFL)
#end = which(qtr == 4 & TimeSecs == 0) - 1
#pdf('Histdrive.pdf')
#histogram(NFL$Drive)
#dev.off()
#var = rep(0,464)
#var[1:232] = NFL$PosTeamScore[end]
#var[233:464] = NFL$DefTeamScore[end]
#jpeg('Pointdistribution.jpeg')
#densityplot(var,xlab = "Actual score of the game")
#dev.off()
#############################################################
ctrl = trainControl(method = "repeatedcv",
number = 3,
repeats = 2,
search = "random")
team.tree =train(PosTeamScore~.,data = team.score.offense,
method = "rf",
tuneLength = 15,
trControl = ctrl)
#Look like mtry is 9!
team.treefit = tree(PosTeamScore~.,data = team.score.offense)
team.rf = randomForest(PosTeamScore~.,data = team.score.offense,
mtry = 9,ntree = 500)
team.rfpred = predict(team.rf,newdata = team.score.offense)
print(mean((team.rfpred - team.score.offense$PosTeamScore)^2))
team.svm = svm(PosTeamScore~.,data = team.score.offense,kernel = "radial")
team.svmpred = predict(team.svm,newdata = team.score.offense)
team.tune = tune(svm,PosTeamScore~.,data = team.score.offense,
ranges = list(epsilon = seq(.05,.4,0.050),
cost = seq(from = 1, to = 40,by = 1)))
#We now have the best SVM model with our parameters.
#Now we can use the best model to make predictions.
team.tunebest = team.tune$best.model
team.tunepred = predict(team.tunebest,newdata = team.score.offense)
#print(mean((team.svmpred - team.score.offense$PosTeamScore)^2))
print(mean((team.tunepred - team.score.offense$PosTeamScore)^2))
ctrl = trainControl(method = "boot",
repeats = 10)
team.plsfit = train(PosTeamScore~.,data = team.score.offense,
method = "rpart",
tuneLength = 10,trControl = ctrl)
team.trainplspred = predict(team.plsfit,newdata = team.score.offense)
print(mean((team.trainplspred - team.score.offense$PosTeamScore)^2))
pls.fit = plsr(PosTeamScore~.,data = team.score.offense,ncomp = 10,
validation = "CV",scale = TRUE)
# Lets pull current NFL data to test our models against it for an "average MSE"
# across the season so far to see how our models are doing.
NFL2016 = nflscrapR::season_play_by_play(2016,Weeks = 12) #Pulling 2016 season up to week 12
nadown = which(is.na(NEWNFL$down))
NFL.na = NFL2016[-nadown,]
NFL.svmpred = predict(team.tunebest,newdata = NFL.na)
NFL.rfpred = predict(team.rf,newdata = NFL.na)
NFL.plspred = predict(team.plsfit,newdata = NFL.na)
print(mean((NFL.svmpred - NFL.na$PosTeamScore)^2))
print(mean((NFL.rfpred - NFL.na$PosTeamScore)^2))
print(mean((NFL.plspred - NFL.na$PosTeamScore)^2))
#NFL.offense = NFL.na[c('yrdln','yrdline100','ydsnet',
# 'Yards.Gained','Touchdown','sp',
# 'AbsScoreDiff','Drive',
# 'TimeSecs','PosTeamScore','GoalToGo')]
#NFL.pred = predict(team.tunebest,newdata = NFL.offense)
#print(mean((NFL.pred - NFL.na$PosTeamScore)^2))
# GAME ID for Miami versus SF November 27,2016
# END SCORE 31-24 MIAMI
# nflscrapR::game_play_by_play(2016112706)
# Game ID for Dallas versus SF October 2, 2016
# END SCORE 24-17 Dallas
# nflscrapR::game_play_by_play(2016100211) -> SF2016
# 2015 Superbowl(2016020700)
# 2013 Superbowl(2014020200)
# 2012 Superbowl(2013020300)
# 2016112001
# 2017 Superbowl(2017020500)
NFLGame = function(gameid){
team.game = nflscrapR::game_play_by_play(gameid)
#SVM TREE PLS FOR TEAM 1
team.off = which(team.game$posteam == unique(team.game$posteam)[1])
game.predictme = team.game[team.off,]
team.svmpred = predict(team.tunebest,newdata = game.predictme)
team.treepred = predict(team.rf,newdata = game.predictme)
team.plspred = predict(team.plsfit,newdata = game.predictme)
print(unique(team.game$posteam)[1])
print('SVM of team in this game mean squared error')
print(mean((game.predictme$PosTeamScore - team.svmpred)^2))
print('Tree model of team in this game mean squared error')
print(mean((game.predictme$PosTeamScore - team.treepred)^2))
print('PLS of team in this game mean squarederror')
dev.new()
#jpeg('Ten1vsIndOct232016.jpeg')
print(mean((game.predictme$PosTeamScore - team.plspred)^2))
plot(game.predictme$PosTeamScore,type = 'l',lwd = 3)
points(team.svmpred,col = "green",lwd = 3)
points(team.treepred,col = "red",lwd = 3)
points(team.plspred,col = "blue",lwd = 3)
legend('topleft',c("Actual score","SVM","Tree","PLS"),lty= c(1,3,3,3),
lwd = c(3,3,3,3),col = c("Black","green","red","blue"))
title('TEAM 1 SCORE PREDICTION')
#dev.off()
print('Actual end score')
print(max(na.omit(game.predictme$PosTeamScore)))
print('~~~~~~~~~~~~~~~~~~~~')
#SVM TREE PLS FOR TEAM 2
team.off = which(team.game$posteam == unique(team.game$posteam)[2])
game.predictme = team.game[team.off,]
team.svmpred = predict(team.tunebest,newdata = game.predictme)
team.treepred = predict(team.rf,newdata = game.predictme)
team.plspred = predict(team.plsfit,newdata = game.predictme)
print(unique(team.game$posteam)[2])
print('SVM of team in this game mean squared error')
print(mean((game.predictme$PosTeamScore - team.svmpred)^2))
print('Tree model of team in this game mean squared error')
print(mean((game.predictme$PosTeamScore - team.treepred)^2))
print('PLS of team in this game mean squared error')
dev.new()
#jpeg('TenvsInd1Oct232016.jpeg')
print(mean((game.predictme$PosTeamScore - team.plspred)^2))
plot(game.predictme$PosTeamScore,type = 'l',lwd = 3)
points(team.svmpred,col = "green",lwd = 3)
points(team.treepred,col = "red",lwd = 3)
points(team.plspred,col = "blue",lwd = 3)
legend('topleft',c("Actual score","SVM","Tree","PLS"),lty= c(1,3,3,3),
lwd = c(3,3,3,3),col = c("Black","green","red","blue"))
title('TEAM 2 SCORE PREDICTION')
#dev.off()
print('Actual end score')
print(max(na.omit(game.predictme$PosTeamScore)))
print('~~~~~~~~~~~~~~~~~~~~')
}
|
575771bfd7e98c4327325bf1da7aa4ea5d3d1ceb
|
2f384b7e511afa94a1fe48a43ae295fea258d2fa
|
/plot3.R
|
4c07dcc79db0b11ce3d10b08fb4e24fcaf74d118
|
[] |
no_license
|
saran00us/ExData_Plotting1
|
1df648b833e047fd26dcb1103849bdf9184fa404
|
65667e68a1fa991c7088439d4ce7944f80c13acd
|
refs/heads/master
| 2021-01-18T05:21:28.793061
| 2015-09-11T22:33:05
| 2015-09-11T22:33:05
| 42,327,165
| 0
| 0
| null | 2015-09-11T19:05:56
| 2015-09-11T19:05:55
| null |
UTF-8
|
R
| false
| false
| 1,167
|
r
|
plot3.R
|
## read the downloaded dataset
setwd("/Users/saran/explore")
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl,destfile="household_power_consumption.zip",method="curl")
unzip(zipfile="./household_power_consumption.zip")
data <- read.table("./household_power_consumption.txt", header=TRUE, sep=";",stringsAsFactors=FALSE, dec=".")
## select just the 2 dates data
twodaydata <- data[data$Date %in% c("1/2/2007","2/2/2007"),]
Globalactivepower <- as.numeric(twodaydata$Global_active_power)
Submetering1 <- as.numeric(twodaydata$Sub_metering_1)
Submetering2 <- as.numeric(twodaydata$Sub_metering_2)
Submetering3 <- as.numeric(twodaydata$Sub_metering_3)
datetime <- strptime(paste(twodaydata$Date,twodaydata$Time, sep=" "),"%d/%m/%Y %H:%M:%S")
##write the graph into the file
png("plot3.png")
plot(datetime,Submetering1,type="l",xlab="",ylab="Energy sub metering")
lines(datetime,Submetering2,type="l",col="red")
lines(datetime,Submetering3,type="l",col="blue")
legend("topright",border="black",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lwd=3,
col=c("black","red","blue"))
dev.off()
|
2ba5687d37007b043fddfad1fd3937a77236dc16
|
805326933defe8e1e7bae2b9f2bc0c36c84fff8d
|
/108-1-exam4.R
|
34b16bf6284d5f2d91e864afcf20224e966d64bc
|
[] |
no_license
|
astalee812/NTPU_108_R
|
39096402d2f2243ce85186a517ef4abd7786ebda
|
e6d8f24bac37f1f0b75c71882d4d889edc278325
|
refs/heads/master
| 2020-06-18T17:40:51.830590
| 2019-09-07T09:51:24
| 2019-09-07T09:51:24
| 196,385,231
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,757
|
r
|
108-1-exam4.R
|
# 1 #
head(ChickWeight)
summary(ChickWeight)
plot(ChickWeight$Time,ChickWeight$weight,xlab = 'time',ylab = 'weight')
cor(ChickWeight$weight,ChickWeight$Time)
plot(ChickWeight$weight~ChickWeight$Diet,xlab = 'diet',ylab = 'weight')
par(mfrow = c(2, 2))
hist(ChickWeight$weight[ChickWeight$Diet=='1'],breaks = 10,main = 'Histogram: Weight for Diet 1', xlab = "weight for diet1")
hist(ChickWeight$weight[ChickWeight$Diet=='2'],breaks = 10,main = 'Histogram: Weight for Diet 2', xlab = "weight for diet2")
hist(ChickWeight$weight[ChickWeight$Diet=='3'],breaks = 10,main = 'Histogram: Weight for Diet 3', xlab = "weight for diet3")
hist(ChickWeight$weight[ChickWeight$Diet=='4'],breaks = 10,main = 'Histogram: Weight for Diet 4', xlab = "weight for diet4")
# 2-1 #
score <- read.table("C:/Users/ASUS/Documents/GitHub/NTPU_108_R/data/score1032.txt",header=TRUE)
library(fields)
rc <- tim.colors(nrow(score))
cc <- rainbow(ncol(score[,c(5:11)]), start = 0, end = .3)
heatmap(as.matrix(score[,c(5:11)]), scale = "none", RowSideColors = rc, ColSideColors = cc, main = "heatmap")
heatmap(as.matrix(score[,c(5:11)]), scale = "none", Rowv = T, Colv = T, RowSideColors = rc, ColSideColors = cc, main = "heatmap")
# 3 #
x <- seq(-1, 3, 1)
y <- seq(-2, 2, 1)
xy <- data.frame(x=rep(x, each=length(y)), y=rep(y, length(x)))
my.dbvn <- function(x,y,a,b,c,d,e){
fxy <- (1 / (2 * pi * sqrt(c) * sqrt(d) * sqrt(1 - (e ^ 2)))) * exp(-(1/(2 * (1 - (e ^ 2)))) *
((((x-a)^2)/c)+(((y-b)^2)/d) + ((2*e*(x-a)*(y-b))/(a*b))))
fxy
}
my.dbvn(xy$x,xy$y,1,0,2,0.5,0.6)
xy <- data.frame(x=rep(x, each=length(y)), y=rep(y, length(x)),my.dbvn(x,y,1,0,2,0.5,0.6))
my.dbvn(xy$x,xy$y,1,0,2,0.5,0.6)
4^(-1)
|
a9fcc15e0211bd5a26822155e98d58e67ff0e60b
|
c08b43bdb1e649fa998e1a8c48b848cf52fe27a0
|
/R/summarystatscorrelationbayesianpairs.R
|
db7f598eea2938ba5e4aba0db658f4f13976fde3
|
[] |
no_license
|
TimKDJ/jaspSummaryStatistics
|
019a57908278c95798f61803f42ec5f6f7b6fc80
|
e269c303fe1134446e4087590ed4c36be74adf8b
|
refs/heads/master
| 2023-05-27T22:19:55.580206
| 2021-06-17T12:29:46
| 2021-06-17T12:29:46
| 271,536,630
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,719
|
r
|
summarystatscorrelationbayesianpairs.R
|
#
# Copyright (C) 2013-2020 University of Amsterdam
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
SummaryStatsCorrelationBayesianPairs <- function(jaspResults, dataset=NULL, options, ...) {
# TODO(Alexander): When we allow for other test points, add a check like this
#
# .checkErrors.summarystats.binomial(options)
# Compute the results and create main results table
ready <- options[["n"]] > 0
correlationContainer <- .getContainerCorSumStats(jaspResults)
corModel <- .computeModelCorSumStats(correlationContainer, options, ready)
.createTableCorSumStats(correlationContainer, corModel, options)
if (options[["plotPriorPosterior"]] || options[["plotBfRobustness"]])
.createPlotsCorSumStats(correlationContainer, corModel, options)
}
# Execute Bayesian binomial test ----
.computeModelCorSumStats <- function(correlationContainer, options, ready) {
if (!is.null(correlationContainer[["corModel"]]))
return(correlationContainer[["corModel"]]$object)
if (!ready)
return(NULL)
statObs <- switch(options[["method"]],
pearson = options[["rObs"]],
kendall = options[["tauObs"]],
spearman = options[["rhoSObs"]])
corResults <- bstats::bcor.testSumStat(n=options[["n"]], stat=statObs, alternative=options[["alternative"]],
method=options[["method"]], ciValue=options[["ciValue"]],
kappa=options[["kappa"]])
pValue <- bstats::pValueFromCor(n=options[["n"]], stat=statObs, method=options[["method"]])
results <- modifyList(corResults, pValue)
if (!is.null(results[["error"]]))
correlationContainer$setError(results[["error"]])
correlationContainer[["corModel"]] <- createJaspState(results)
correlationContainer[["corModel"]]$dependOn("ciValue")
return(results)
}
.getContainerCorSumStats <- function(jaspResults) {
correlationContainer <- jaspResults[["correlationContainer"]]
if (is.null(correlationContainer)) {
correlationContainer <- createJaspContainer()
correlationContainer$dependOn(c("n", "method", "rObs", "tauObs",
"rhoSObs", "kappa"))
jaspResults[["correlationContainer"]] <- correlationContainer
}
return(correlationContainer)
}
.createTableCorSumStats <- function(correlationContainer, corModel, options) {
if (!is.null(correlationContainer[["corBayesTable"]]))
return()
corBayesTable <- .createTableMarkupCorSumStats(options)
errorMessage <- corModel[["error"]]
if (is.null(corModel) || correlationContainer$getError()) {
statObs <- switch(options[["method"]],
pearson = options[["rObs"]],
kendall = options[["tauObs"]],
spearman = options[["rhoSObs"]])
emptyIshRow <-list("n"=".", "stat"=statObs, "bf"=".", "p"=".")
if (options[["ci"]])
emptyIshRow <- modifyList(emptyIshRow, list("upperCi"=".", "lowerCi"="."))
corBayesTable$addRows(emptyIshRow)
} else {
itemList <- c("n", "stat", "bf", "p")
if (options[["ci"]])
itemList <- c(itemList, "lowerCi", "upperCi")
sidedObject <- bstats::getSidedObject(corModel, alternative=options[["alternative"]])
rowResult <- sidedObject[itemList]
if (options[["bayesFactorType"]] == "BF01")
rowResult[["bf"]] <- 1/rowResult[["bf"]]
else if (options[["bayesFactorType"]] == "LogBF10")
rowResult[["bf"]] <- log(rowResult[["bf"]])
corBayesTable$setData(rowResult)
}
correlationContainer[["corBayesTable"]] <- corBayesTable
}
.createTableMarkupCorSumStats <- function(options){
# create table and state dependencies
corBayesTable <- createJaspTable(title=jaspRegression::.getCorTableTitle(options[["test"]], bayes=TRUE))
corBayesTable$showSpecifiedColumnsOnly <- TRUE
corBayesTable$position <- 1
corBayesTable$dependOn(c("bayesFactorType", "ci", "ciValue", "alternative"))
corBayesTable$addCitation(jaspRegression::.getCorCitations(options[["method"]], bayes=TRUE))
# Add sided footnote
#
if (options[["alternative"]]=="greater")
corBayesTable$addFootnote(jaspRegression::.getBfTableSidedFootnote(alternative="greater", analysis="correlation"))
if (options[["alternative"]]=="less")
corBayesTable$addFootnote(jaspRegression::.getBfTableSidedFootnote(alternative="less", analysis="correlation"))
bfTitle <- jaspRegression::.getBfTitle(options[["bayesFactorType"]], options[["alternative"]])
statName <- switch(options[["method"]],
pearson = gettext("r"),
kendall = gettext("tau"),
spearman = gettext("rho")
)
corBayesTable$addColumnInfo(name = "n", title = gettext("n"), type = "integer")
corBayesTable$addColumnInfo(name = "stat", title = statName, type = "number")
corBayesTable$addColumnInfo(name="bf", title=bfTitle, type="number")
corBayesTable$addColumnInfo(name = "p", title = gettext("p"), type = "number")
if (options[["ci"]]) {
overTitle <- gettextf("%s%% Credible interval", options[["ciValue"]] * 100)
corBayesTable$addColumnInfo(name="lowerCi", overtitle=overTitle, type="number", title="Lower")
corBayesTable$addColumnInfo(name="upperCi", overtitle=overTitle, type="number", title="Upper")
}
return(corBayesTable)
}
# Prior and Posterior plot ----
.createPlotsCorSumStats <- function(correlationContainer, corModel, options) {
# a. Get plot container -----
#
plotContainer <- correlationContainer[["plotContainer"]]
if (is.null(plotContainer)) {
plotContainer <- createJaspContainer(title=gettext("Inferential Plots"))
plotContainer$dependOn("alternative")
plotContainer$position <- 2
correlationContainer[["plotContainer"]] <- plotContainer
}
# b. Define dependencies for the plots -----
# For plotPriorPosterior
#
bfPlotPriorPosteriorDependencies <- c("plotPriorPosteriorAddTestingInfo", "plotPriorPosteriorAddEstimationInfo", "plotPriorPosterior")
if (options[["plotPriorPosteriorAddEstimationInfo"]])
bfPlotPriorPosteriorDependencies <- c(bfPlotPriorPosteriorDependencies, "ciValue")
# For plotBfRobustness
#
bfPlotRobustnessDependencies <- c("plotBfRobustnessAddInfo", "plotBfRobustness")
if (options[["plotBfRobustnessAddInfo"]])
bfPlotRobustnessDependencies <- c(bfPlotRobustnessDependencies, "bayesFactorType")
plotItemDependencies <- list(
"plotPriorPosterior" = bfPlotPriorPosteriorDependencies,
"plotBfRobustness" = bfPlotRobustnessDependencies
)
plotItems <- jaspRegression::.getCorPlotItems(options, sumStat=TRUE)
alternative <- options[["alternative"]]
# c. Per plotItem add plot ------
#
for (i in seq_along(plotItems)) {
item <- plotItems[i]
jaspPlotResult <- plotContainer[[item]]
plotResult <- jaspPlotResult$plotObject
# d. Check if plot is in there ------
#
if (is.null(plotResult)) {
itemTitle <- jaspRegression::.bfPlotTitles[[item]]
jaspPlotResult <- createJaspPlot(title=itemTitle, width=530, height=400)
jaspPlotResult$dependOn(options = plotItemDependencies[[item]])
jaspPlotResult$position <- i
plotContainer[[item]] <- jaspPlotResult
if (correlationContainer$getError() || is.null(corModel))
next
if (item == "plotPriorPosterior")
plot <- jaspRegression::.drawPosteriorPlotCorBayes(correlationContainer, corModel, options, methodItems=options[["method"]], purpose="sumStat")
else if (item == "plotBfRobustness")
plot <- jaspRegression::.drawBfRobustnessPlotCorBayes(corModel, options, options[["method"]])
.checkAndSetPlotCorBayes(plot, jaspPlotResult)
}
}
}
.checkAndSetPlotCorBayes <- function(triedPlot, jaspPlotResult) {
if (isTryError(triedPlot)) {
jaspPlotResult$setError(.extractErrorMessage(triedPlot))
} else if (is.character(triedPlot)) {
jaspPlotResult$setError(triedPlot)
} else {
jaspPlotResult$plotObject <- triedPlot
}
}
|
ff771fd91fc30d54b42c368c12c3d22bedc63b61
|
5ed44176b4e3716a44565d118283223c07b791a3
|
/R/AAA_TSP-package.R
|
a0d9a76ab4cfd3b556362a5b52aead2e81a5501c
|
[] |
no_license
|
mhahsler/TSP
|
d94ead22a9d3e3b44829477ff474ce458b857623
|
f274bf7098570943674f0e7ef3a281cda78a040e
|
refs/heads/master
| 2023-08-09T03:27:48.714780
| 2023-07-21T14:32:25
| 2023-07-21T14:32:25
| 43,990,993
| 66
| 17
| null | 2020-01-23T18:53:26
| 2015-10-10T02:54:22
|
R
|
UTF-8
|
R
| false
| false
| 1,101
|
r
|
AAA_TSP-package.R
|
#' @title `r packageDescription("TSP")$Package`: `r packageDescription("TSP")$Title`
#'
#' @description Basic infrastructure and some algorithms for the traveling salesperson problem (also traveling salesman problem; TSP). The package provides some simple algorithms and an interface to the Concorde TSP solver and its implementation of the Chained-Lin-Kernighan heuristic. The code for [Concorde](https://www.math.uwaterloo.ca/tsp/concorde/) itself is not included in the package and has to be obtained separately.
#'
#' @references Michael Hahsler and Kurt Hornik. TSP -- Infrastructure for the traveling salesperson problem. Journal of Statistical Software, 23(2):1--21, December 2007. \doi{10.18637/jss.v023.i02}
#'
#' @section Key functions:
#' - [solve_TSP()]
#'
#' @author Michael Hahsler
#' @docType package
#' @name TSP-package
#'
#' @importFrom stats as.dist dist
#' @importFrom utils read.table write.table head tail
#' @importFrom grDevices gray.colors
#' @importFrom graphics image.default plot polygon
#' @importFrom foreach foreach "%dopar%"
#' @useDynLib TSP, .registration=TRUE
NULL
|
de3ea82af5692e5195d5e10e3e82f4baf255daef
|
98b52689fed8aa8ecc3f1d6dc000adb7970f691f
|
/figures/figure-3.R
|
3cb8c3c523a360bd90aefd3b68fae22ecd4bc225
|
[] |
no_license
|
rsankowski/sankowski-et-al-microglia
|
73114ec0dff912ee2f9b9cb99bad4e7e62b07db2
|
d6c932feb4a2278993fa471a46e85be7398e206d
|
refs/heads/master
| 2021-08-29T14:01:00.386117
| 2021-08-16T11:26:59
| 2021-08-16T11:26:59
| 154,642,566
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 28
|
r
|
figure-3.R
|
#figure 3 plotting functions
|
2ce3fb03c76ba1d107e33c7b24e2fd800a117702
|
33e75096778794d0c3d8254933211e2ed155056f
|
/man/marginal.rcate.Rd
|
ab0d894355f41240c13daf5930570520f36c5521
|
[] |
no_license
|
changwn/RCATE
|
75224db7efe9d94391aec3c0822510d7495e992b
|
bc5d199592629b09eb65f635d024f7d04b249035
|
refs/heads/master
| 2022-12-05T22:01:19.306974
| 2020-08-23T13:02:08
| 2020-08-23T13:02:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 520
|
rd
|
marginal.rcate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/marginal.R
\name{marginal.rcate}
\alias{marginal.rcate}
\title{Marginal treatment effect plot.}
\usage{
marginal.rcate(object, variable.name = NULL, ...)
}
\arguments{
\item{object}{"rcate.ml", "rcate.rf" or "rcate.am" object.}
\item{variable.name}{the name of interested variable. Default is the name of the
first continuous variable.}
\item{...}{other.}
}
\description{
\code{marginal.rcate} Returns the marginal treatment effect plot.
}
|
4250b06bcefa4fc3b54cb38371c0b7ad0984a33a
|
0f0fc5307324e209d8f039e006db6d7bcbbe7ee9
|
/RFinance/man/addDataFrameToSQLiteDB.Rd
|
3a3106986ad46896cb1297184d112dbd9186a936
|
[] |
no_license
|
rootfs-analytics/RFinance
|
169eb6ec79778755321f1736eae5f13741771530
|
ec3c90e08c8d4418c5f4f3f7d1eea08ccc619f8b
|
refs/heads/master
| 2021-05-26T20:46:26.670261
| 2012-11-11T23:12:06
| 2012-11-11T23:12:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 536
|
rd
|
addDataFrameToSQLiteDB.Rd
|
\name{addDataFrameToSQLiteDB}
\alias{addDataFrameToSQLiteDB}
\title{
Add Data Frame to DB
}
\description{
Append a data frame to an existing SQLite table
}
\usage{
addDataFrameToSQLiteDB(con, tablename, df)
}
\arguments{
\item{con}{
Connection to the db
}
\item{tablename}{
Name of the table to add to
}
\item{df}{
The data frame to add
}
}
\details{
Adds the data frame to the SQLite table
}
\value{
}
\references{
}
\author{
Jeffrey Wong
}
\note{
}
\seealso{
SQLiteQuery
}
\examples{
}
|
6281155ef2ccb78c35c0c9e740ac0fa55d9998b3
|
a0830531052bd2330932c3a2c9750326cf8304fc
|
/vmstools/man/overlapPolygons.Rd
|
52b148702f18fdfceec5acc97cd4bcbd0ad48b22
|
[] |
no_license
|
mcruf/vmstools
|
17d9c8f0c875c2a107cfd21ada94977d532c882d
|
093bf8666cdab26d74da229f1412e93716173970
|
refs/heads/master
| 2021-05-29T20:57:18.053843
| 2015-06-11T09:49:20
| 2015-06-11T09:49:20
| 139,850,057
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,832
|
rd
|
overlapPolygons.Rd
|
\name{overlapPolygons}
\alias{overlapPolygons}
\title{
Calculate the surface area that polygons have in common
}
\description{
Calculate the surface area that 2 or more polygons have in common
}
\usage{
overlapPolygons(pol1 = NULL, pol2 = NULL, projection = "LL", zone = NULL)
}
\arguments{
\item{pol1}{
Polygon number 1. Can be of class 'data.frame' with first column Longitude and
second column Latitude. Can be of class 'PolySet' and can be of class 'SpatialPolygons'.
}
\item{pol2}{
Polygon number 2. Can be of class 'data.frame' with first column Longitude and
second column Latitude. Can be of class 'PolySet' and can be of class 'SpatialPolygons'.
}
\item{projection}{
Optional projection attribute to add (often "LL" for Longitude-Latitude).
}
\item{zone}{
Optional zone attribute to add.
}
}
\value{
Returns a data.frame with overlap in km2 (or non-projected units) with PID
referring to the combination of the two polygon sets.
}
\author{
Katell Hamon, Niels Hintzen
}
\seealso{
\code{\link{lonLat2SpatialPolygons}}, \code{\link{as.PolySet}}, \code{\link{surface}}
}
\examples{
#- Test with data.frame polygons
pol1 <- data.frame(cbind(c(2,3,3,2),c(54,54,55,55)))
pol2 <- data.frame(cbind(c(2,3,3,2),c(55,55,54.5,54.5)))
overlapPolygons(pol1,pol2)
#- Test with SpatialPolygons
pol1 <- lonLat2SpatialPolygons(SI_LONG=c(2,3,3,2),SI_LATI=c(54,54,55,55))
pol2 <- lonLat2SpatialPolygons(SI_LONG=c(2,3,3,2),SI_LATI=c(54.5,54.5,55,55))
overlapPolygons(pol1,pol2)
surface(pol1)@polygons[[1]]@Polygons[[1]]@area
#- Test with PolySet polygons
pol1 <- as.PolySet(data.frame(PID=rep(1,4),POS=1:4,X=c(2,3,3,2),Y=c(54,54,55,55)))
pol2 <- as.PolySet(data.frame(PID=rep(1,4),POS=1:4,X=c(2,3,3,2),Y=c(54.5,54.5,55,55)))
overlapPolygons(pol1,pol2)
#- Test with multiple polygons
data(tacsat)
pols1 <- cbind(s1=rep(2,length(seq(49,63,2))),s2=c(seq(49,63,2)))
pols2 <- cbind(s1=tacsat$SI_LONG[seq(2,nrow(tacsat),length.out=5)],s2=tacsat$SI_LATI[seq(2,nrow(tacsat),length.out=5)])
resx <- 1; resy <- 0.5
sPols1 <- lonLat2SpatialPolygons(lst=lapply(as.list(1:nrow(pols1)),
function(x){data.frame(SI_LONG=c(pols1[x,"s1"]-resx/2,rep(pols1[x,"s1"]+resx/2,2),pols1[x,"s1"]-resx/2),
SI_LATI=c(rep(pols1[x,"s2"]-resy/2,2),rep(pols1[x,"s2"]+resy/2,2)))}))
sPols2 <- lonLat2SpatialPolygons(lst=lapply(as.list(1:nrow(pols2)),
function(x){data.frame(SI_LONG=c(pols2[x,"s1"]-resx/2,rep(pols2[x,"s1"]+resx/2,2),pols2[x,"s1"]-resx/2),
SI_LATI=c(rep(pols2[x,"s2"]-resy/2,2),rep(pols2[x,"s2"]+resy/2,2)))}))
overlapPolygons(sPols1,sPols2)
}
|
f1f230919ff1ab9199d82b27a56be2b2d4dc99e3
|
b96e92d86bd142159e4674c59c6fbaf730049802
|
/man/vd_multiple_csv.Rd
|
244c2dd63578ed320a016b2d8fbf9c75cbe9de50
|
[] |
no_license
|
trinker/valiData
|
0ac536b9ed0435ff27f61973d949e9036fc8c1ac
|
59caaa67acaafb2508e90281812997464766d6f1
|
refs/heads/master
| 2022-06-09T05:59:46.696388
| 2022-05-12T18:25:54
| 2022-05-12T18:25:54
| 74,035,459
| 0
| 1
| null | 2016-11-17T14:37:24
| 2016-11-17T14:37:24
| null |
UTF-8
|
R
| false
| true
| 646
|
rd
|
vd_multiple_csv.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vd_multiple_csv.R
\name{vd_multiple_csv}
\alias{vd_multiple_csv}
\title{Validate Which Folders Contain Multiple CSVs}
\usage{
vd_multiple_csv(path, ...)
}
\arguments{
\item{path}{path to project directory}
\item{\dots}{ignored.}
}
\value{
Returns a list of validation results.
}
\description{
Validate Which Folders Contain Multiple CSVs
}
\examples{
dir_name <- file.path(tempdir(), "delete_me")
dir.create(dir_name)
dir(dir_name)
lapply(1:2, function(i) {
write.csv(mtcars, file = file.path(dir_name, sprintf('file_\%s.csv', i)))
})
vd_multiple_csv(dir_name)
}
|
188a0ae7caf4ca9d3e87f7aa4abf310ebe55458e
|
e22df57d0598d9e52dccc2b2a5f2386fb99afca8
|
/man/textmodel_wordfish.Rd
|
2b78863169d44eb2fa298d40c5a8d9a902657e30
|
[] |
no_license
|
pjsio/quanteda
|
8098b23135fe414ccd408192d2cf5bd3d6a05ef3
|
647e69abb6184057f22452329ae6daf82bc991c3
|
refs/heads/master
| 2020-12-30T18:30:20.116664
| 2015-10-27T09:48:57
| 2015-10-27T09:48:57
| 41,432,391
| 0
| 0
| null | 2015-08-26T15:06:39
| 2015-08-26T15:06:39
| null |
UTF-8
|
R
| false
| false
| 2,973
|
rd
|
textmodel_wordfish.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/textmodel-wordfish.R
\docType{methods}
\name{textmodel_wordfish}
\alias{print.textmodel_wordfish_fitted}
\alias{show,textmodel_wordfish_fitted-method}
\alias{show,textmodel_wordfish_predicted-method}
\alias{textmodel_wordfish}
\title{wordfish text model}
\usage{
textmodel_wordfish(data, dir = c(1, 2), priors = c(Inf, Inf, 3, 1),
tol = c(1e-06, 1e-08))
\method{print}{textmodel_wordfish_fitted}(x, n = 30L, ...)
\S4method{show}{textmodel_wordfish_fitted}(object)
\S4method{show}{textmodel_wordfish_predicted}(object)
}
\arguments{
\item{data}{the dfm on which the model will be fit}
\item{dir}{set global identification by specifying the indexes for a pair of
documents such that \eqn{\hat{\theta}_{dir[1]} < \hat{\theta}_{dir[2]}}.}
\item{priors}{priors for \eqn{\theta_i}, \eqn{\alpha_i}, \eqn{\psi_j}, and
\eqn{\beta_j} where \eqn{i} indexes documents and \eqn{j} indexes features}
\item{tol}{tolerances for convergence (explain why a pair)}
\item{x}{for print method, the object to be printed}
\item{n}{max rows of dfm to print}
\item{...}{additional arguments passed to other functions}
\item{object}{wordfish fitted or predicted object to be shown}
}
\value{
An object of class textmodel_fitted_wordfish. This is a list
containing: \item{dir}{global identification of the dimension}
\item{theta}{estimated document positions} \item{alpha}{estimated document
fixed effects} \item{beta}{estimated feature marginal effects}
\item{psi}{estimated word fixed effects} \item{docs}{document labels}
\item{features}{feature labels} \item{sigma}{regularization parameter for
betas in Poisson form} \item{ll}{log likelihood at convergence}
\item{se.theta}{standard errors for theta-hats} \item{data}{dfm to which
the model was fit}
}
\description{
Estimate Slapin and Proksch's (2008) "wordfish" Poisson scaling model of
one-dimensional document positions using conditional maximum likelihood.
}
\details{
The returns match those of Will Lowe's R implementation of
\code{wordfish} (see the austin package), except that here we have renamed \code{words} to
be \code{features}. (This return list may change.) We have also followed the practice begun with
Slapin and Proksch's early implementation of the model that used a regularization parameter of
se\eqn{(\sigma) = 3}, through the third element in \code{priors}.
}
\examples{
ie2010dfm <- dfm(ie2010Corpus, verbose=FALSE)
wfmodel <- textmodel_wordfish(LBGexample, dir = c(6,5))
wfmodel
\dontrun{if (require(austin)) {
wfmodelAustin <- wordfish(quanteda::as.wfm(LBGexample), dir = c(6,5))
cor(wfmodel@theta, wfmodelAustin$theta)
}}
}
\author{
Benjamin Lauderdale and Kenneth Benoit
}
\references{
Jonathan Slapin and Sven-Oliver Proksch. 2008. "A Scaling Model
for Estimating Time-Series Party Positions from Texts." \emph{American
Journal of Political Science} 52(3):705-772.
}
|
765f4b61daae371a5bd7378089490caa9833a048
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/MissMech/R/OrderMissing.R
|
23ec22f2d8ccfa1261701b79def573dfa9901883
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,878
|
r
|
OrderMissing.R
|
OrderMissing <- function(data, del.lesscases = 0)
{
# case order has the order of the original data
y <- data
if (is.data.frame(y)) {
y <- as.matrix(y)
}
if(!is.matrix(y))
{
cat("Warning data is not a matrix or data frame")
stop("")
}
if(length(y)==0)
{
cat("Warning: data is empty")
return
}
names <- colnames(y)
n <- nrow(y)
pp <- ncol(y)
yfinal <- c()
patused <- c()
patcnt <- c()
caseorder <- c()
removedcases <- c()
ordertemp <- c(1:n)
ntemp <- n
ptemp <- pp
done <- FALSE
yatone <- FALSE
while(!done)
{
pattemp <- is.na(y[1, ])
indin <- c()
indout <- c()
done <- TRUE
for(i in 1:ntemp)
{
if(all(is.na(y[i, ]) == pattemp))
{
indout <- c(indout, i)
} else {
indin <- c(indin, i)
done <- FALSE
}
}
if(length(indin) == 1) yatone = TRUE
yfinal <- rbind(yfinal, y[indout, ])
y <- y[indin, ]
caseorder <- c(caseorder, ordertemp[indout])
ordertemp <- ordertemp[indin]
patcnt <- c(patcnt, length(indout))
patused <- rbind(patused, pattemp)
if(yatone)
{
pattemp <- is.na(y)
yfinal <- rbind(yfinal, matrix(y,ncol=pp))
y <- c()
indin <- c()
indout <- c(1)
caseorder <- c(caseorder, ordertemp[indout])
ordertemp <- ordertemp[indin]
patcnt <- c(patcnt, length(indout))
patused <- rbind(patused, pattemp)
done <- TRUE
}
if(!done) ntemp <- nrow(y)
}
#yfinal <- rbind(yfinal, y)
caseorder <- c(caseorder, ordertemp)
patused <- ifelse(patused, NA, 1)
rownames(patused) <- NULL
colnames(patused) <- names
spatcnt <- cumsum(patcnt)
dataorder <- list(data = yfinal, patused = patused, patcnt = patcnt,
spatcnt = spatcnt, g = length(patcnt),
caseorder = caseorder, removedcases = removedcases)
dataorder$call <- match.call()
class(dataorder) <- "orderpattern"
if(del.lesscases > 0)
{
dataorder <- DelLessData(dataorder, del.lesscases)
}
dataorder$patused <- matrix(dataorder$patused, ncol = pp)
colnames(dataorder$patused) <- names
dataorder
}
#Order <- function(x, ...) UseMethod("Order")
#Order.default <- function(x, ...) {
# temp <- OrderMissing(x)
# temp$call <- match.call()
# class(temp) <- "ordered"
# temp
#}
print.orderpattern <- function(x, ...) {
cat("Call:\n")
print(x$call)
cat("\nNumber of Ptterns: ", x$g, "\n")
cat("\nPttern used:\n")
ni <- x$patcnt
disp.patt <- cbind(x$patused, ni)
colnames(disp.patt)[ncol(disp.patt)] <- "Number of cases"
rownames(disp.patt) <- rownames(disp.patt, do.NULL = FALSE, prefix = "group.")
print(disp.patt, print.gap = 3)
}
summary.orderpattern <- function(object, ...) {
summary(object$data)
}
|
ea4d7ef74a2925e5a658a040df82df65471ff37c
|
8dc78f2d9755faec3452760a61e717c3f426f6c2
|
/network_construction_scale_free.R
|
bbc040e542013690538de1c3aad9ef2114401351
|
[] |
no_license
|
Kedong/supplynetworkresilience
|
fdcbc28d21dc508af92d8f8d9ffb0e16167059c8
|
a8f91124f43432c8d67983389420d2e4621c16c1
|
refs/heads/master
| 2021-08-28T09:49:05.318204
| 2017-12-11T22:35:16
| 2017-12-11T22:35:16
| 109,743,880
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,432
|
r
|
network_construction_scale_free.R
|
## This function constructs a directed supply network under preferential attachment
## Inputs: tier (focal=0), number of suppliers in each tier, starting with 1 as the focal
## Inputs: initial connection m0, if allow same tier connection (0 or 1),
## Inputs: m is # edges at each time, size of tier -- can be a vector and non-integer
## Inputs: power is by default 1 -- the power law alpha, 1=linear
## Inputs: zero.appeal -- The ‘attractiveness’ of the vertices with no adjacent edges, default=0
## Inputs: use_in_degree is by default 0 -- if 1, preferential attachment is based on in-degree only (Price, 1976)
## if same_tier == 0, m[1] is automatically transformed to be 1
## if m is a number, then avg edge is m; if vector, then avg tier edge is m; if matrix, strict integer
## if transpass-tier connection is allowed, the probability of connection to further tiers
## (starting from tier-2-to-0) is "trans_tier"^tier-diff
## if transpass-tier is allowed, the tier-k supplier must have at least one connection to tier-k-1
## if transpass-tier is allowed, start status matters -- open triad is 1 (default, flexible) and 2 (forced)
## while closed triad is 0
## number of suppliers in tier 1 should be at least 2
## Output: igraph object
## Developed by Kedong Chen
## Last update: 12/3/2017
network_construction_ba = function (tier, sup_num_tier, m, sigma_m=0.1, power=1, zero.appeal=0, use_in_degree=0, same_tier=0, trans_tier=1, start_status=1, seed){
require(igraph)
set.seed(seed)
if (length(m)!=1 & length(m)!=tier) {
cat("Wrong length of m \n")
return()
}
# adjacency matrix
adj_m = matrix(0, sum(sup_num_tier), sum(sup_num_tier))
tier_start_index = cumsum(sup_num_tier) # cumulative sum standing for the end index of tier
if (same_tier==0) { # strict tiered supply network
adj_m[(1+tier_start_index[1]):tier_start_index[2],1] = 1 # implying m[1]=1
# from tier 2, start the preferential attachment with edges m
for (i in 2:tier){
# assumption: # edges m at each tier follows normal distribution with mean m[i] sd sigma
if (length(m)!=1) {
size_temp = round(rnorm(n = sup_num_tier[i+1], mean = m[i], sd = sigma_m), digits=0)
} else {
size_temp = round(rnorm(n = sup_num_tier[i+1], mean = m, sd = sigma_m), digits=0)
}
for (j in 1:sup_num_tier[i+1]) {
colsum = colSums(adj_m) # for in-degree
rowcolsum = rowSums(adj_m) + colsum
if (use_in_degree) {
prob_temp = colsum[(1+tier_start_index[i-1]):tier_start_index[i]]^power + zero.appeal
} else {
prob_temp = rowcolsum[(1+tier_start_index[i-1]):tier_start_index[i]]^power + zero.appeal
}
if (sum(prob_temp)!=0) {
prob_pref_attach = prob_temp / sum(prob_temp)
} else {
prob_pref_attach = rep(1/length(prob_temp), length(prob_temp))
}
# assumption: preferential attachment is on both incoming & outgoing degrees...
adj_m[j+tier_start_index[i],(1+tier_start_index[i-1]):tier_start_index[i]][sample(c(1:sup_num_tier[i]),
size=size_temp[j], replace=F, prob=prob_pref_attach+1e-10)] = 1
}
}
sn_ba = graph_from_adjacency_matrix(adj_m)
} else {
# tier-1 special, starting from open triad (flexible)
adj_m[2:3,1] = 1
if (length(m)!=1) {
size_temp = round(rnorm(sup_num_tier[2], mean = m[1], sd = sigma_m), digits=0)
} else {
size_temp = round(rnorm(sup_num_tier[2], mean = m, sd = sigma_m), digits=0)
}
if (size_temp[1]>1 | size_temp[2]>1) {
adj_m[3,2] = 1
}
if (start_status == 0) { # forced closed triad
adj_m[3,2] = 1
}
if (start_status == 2) { # forced open triad
adj_m[3,2] = 0
}
if (sup_num_tier[2]>2) {
# we assume tier-k supplier MUST connect to at least one tier-k-1 supplier
for (j in 3:sup_num_tier[2]) {
colsum = colSums(adj_m)
rowcolsum = rowSums(adj_m) + colsum
if (use_in_degree) {
prob_temp = colsum[1:j]^power + zero.appeal
} else {
prob_temp = rowcolsum[1:j]^power + zero.appeal
}
prob_pref_attach = prob_temp / sum(prob_temp)
# Part I. the "must" portion -- connect to one tier-k-1
# Part II. the "optional" portion -- may not need to connect if m[1] is 1
if (size_temp[j]>1) {
selected_temp = sample(c(1:j), size=size_temp[j], replace=F, prob=prob_pref_attach)
if (1 %in% selected_temp) {
adj_m[1+j,selected_temp] = 1
} else {
adj_m[1+j,1] = 1
if (sum(prob_pref_attach[2:j])==0) {
new_prob = rep(1/(j-1), j-1)
} else {
new_prob = prob_pref_attach[2:j]
}
adj_m[1+j,sample(c(2:j), size=size_temp[j]-1, replace=F, prob=new_prob)] = 1
}
} else {
adj_m[1+j,1] = 1
}
}
}
# now begin tier-2 and so on...
prob_trans_index = c(0,0)
for (i in 2:tier){
if (length(m)!=1) {
size_temp = round(rnorm(n = sup_num_tier[i+1], mean = m[i], sd = sigma_m), digits=0)
} else {
size_temp = round(rnorm(n = sup_num_tier[i+1], mean = m, sd = sigma_m), digits=0)
}
prob_trans_index = c(i-1,prob_trans_index)
for (j in 1:sup_num_tier[i+1]) {
colsum = colSums(adj_m)
rowcolsum = rowSums(adj_m) + colsum
if (use_in_degree) {
prob_temp = colsum[1:(tier_start_index[i]+j-1)]^power + zero.appeal
} else {
prob_temp = rowcolsum[1:(tier_start_index[i]+j-1)]^power + zero.appeal
}
# consider the reduced prob for transpass-connection
trans_prob = rep(trans_tier^prob_trans_index, times=sup_num_tier[1:length(trans_tier^prob_trans_index)])
prob_pref_attach = prob_temp/sum(prob_temp)*trans_prob[1:(tier_start_index[i]+j-1)]
# Part I. the "must" portion -- connect to one tier-k-1
# Part II. the "optional" portion -- may not need to connect if m[1] is 1
if (size_temp[j]>1) {
selected_temp = sample(c(1:(tier_start_index[i]+j-1)), size=size_temp[j], replace=F, prob=prob_pref_attach)
if (sum(c((1+tier_start_index[i-1]):tier_start_index[i]) %in% selected_temp)>0) { # if any previous tier is selected...
adj_m[j+tier_start_index[i],selected_temp] = 1
} else {
# tier-k-1 pick one
if (sum(prob_pref_attach[c((1+tier_start_index[i-1]):tier_start_index[i])])==0) {
new_prob = rep(1/(sup_num_tier[i]),sup_num_tier[i])
} else {
new_prob = prob_pref_attach[c((1+tier_start_index[i-1]):tier_start_index[i])]
}
tier_k_minus_1_picked = sample(c((1+tier_start_index[i-1]):tier_start_index[i]), size=1, replace=F, prob=new_prob)
adj_m[j+tier_start_index[i], tier_k_minus_1_picked] = 1
# all nodes pick one
# delete that picked node first
delete_that_node = c(1:(tier_start_index[i]+j-1))[-match(tier_k_minus_1_picked, c(1:(tier_start_index[i]+j-1)))]
other_picked = sample(delete_that_node, size=size_temp[j]-1, replace=F, prob=prob_pref_attach[-match(tier_k_minus_1_picked, c(1:(tier_start_index[i]+j-1)))])
adj_m[j+tier_start_index[i],other_picked] = 1
}
} else {
selected_temp = sample(c(1:(tier_start_index[i]+j-1)), size=size_temp[j], replace=F, prob=prob_pref_attach)
if (sum(c((1+tier_start_index[i-1]):tier_start_index[i]) %in% selected_temp)>0) {
adj_m[j+tier_start_index[i], selected_temp] = 1
} else {
if (sum(prob_pref_attach[c((1+tier_start_index[i-1]):tier_start_index[i])])==0) {
new_prob = rep(1/(sup_num_tier[i]),sup_num_tier[i])
} else {
new_prob = prob_pref_attach[c((1+tier_start_index[i-1]):tier_start_index[i])]
}
tier_k_minus_1_picked = sample(c((1+tier_start_index[i-1]):tier_start_index[i]), size=size_temp[j], replace=F, prob=new_prob)
adj_m[j+tier_start_index[i],tier_k_minus_1_picked] = 1
}
}
}
}
sn_ba = graph_from_adjacency_matrix(adj_m)
}
V(sn_ba)$name = paste0("v",0:(sum(sup_num_tier)-1))
V(sn_ba)$tier = rep(c(0:tier),sup_num_tier)
V(sn_ba)$color = 1+V(sn_ba)$tier
return(list(sn_ba=sn_ba))
}
|
ed3edcf40fd5864761c430e92b9ec589d8ac7271
|
6927be295792e510cb2f5e1cc500fa24dffb9755
|
/2주차/문제2.R
|
d681cc03deeb727a496c84e150d912622db89fbf
|
[] |
no_license
|
jiin124/R
|
0ece0f0ae85b131800a3f1015d329d2b1a3bcc82
|
2c8d3bb3d55bda6aee9139a680b7765877e24f92
|
refs/heads/main
| 2023-07-18T13:56:16.077549
| 2021-08-30T17:22:41
| 2021-08-30T17:22:41
| 344,785,924
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 217
|
r
|
문제2.R
|
m<-matrix(c(1, 5, 0, 2, 5, 3, 4, 5, 2, 4,3, 0, 6, 7, 3, 6, 7, 7, 3, 5 ),4,5);m
t(m)
r1=m[1,,drop=F];r1
c3=m[,3,drop=F];c3
c4=cbind(m[,2],m[,4]);c4
m1=matrix(c(m[1,2:5]),2,2);m1
apply(m,1,mean)
apply(m,2,mean)
|
c4f52d27d2465eec88389ee8513b6fa3fc4409bf
|
6a782946ca5fa43ec97f15012799c4f01ecab16c
|
/Module 2/Lecture:HW/bioinfo_first.R
|
6704b9585c4793f634040fd560520903f1e5d913
|
[] |
no_license
|
htphan16/Bioinformatics-Spring-2019
|
5231beebffba503d79eacc9c87f575bb2b1258aa
|
836c0f5202e20c21702afb7901d3411a2cb05e97
|
refs/heads/master
| 2020-04-29T01:19:37.068559
| 2019-06-03T15:21:08
| 2019-06-03T15:21:08
| 175,726,493
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,049
|
r
|
bioinfo_first.R
|
# Get current working directory
getwd()
# Set new working directory
# setwd('~/Desktop/')
data <- as.matrix(read.table('Table1.txt', header=TRUE, sep='\t', row.names = 1))
data
dim(data)
oturichness <- rowSums(data)
oturichness
rowSums(data[1:2,])
colSums(data)
# Presence-absence of OTUs in each sample
dataPA <- (data>0)*1
dataPA
(data>0)
TRUE*1
# species richness
rich <- colSums(dataPA)
rich
# relative abundance
dataREL <- data
dataREL[,1] = data[,1]/sum(data[,1])
dataREL[,2] = data[,2]/sum(data[,2])
dataREL[,3] = data[,3]/sum(data[,3])
dataREL[,4] = data[,4]/sum(data[,4])
dataREL
dataREL2 <- data
for (i in 1:4) {
dataREL2[,i] <- data[,i]/sum(data[,i])
}
dataREL2
colSums(dataREL2)
# transpose matrix
t(dataREL2)
# Distance among samples, and among OTUs
library(vegan)
# First parameter must be the rows of data among which we calculate distance
# Distance among samples in terms of presence-absence
samplePA.dist <- vegdist(t(dataPA), method='jaccard')
samplePA.dist
# Distance among OTUs in terms of presence-absence
otuPA.dist <- vegdist(dataPA, method='jaccard')
otuPA.dist
# Distance among samples in terms of relative abundance
sampleREL.dist <- vegdist(t(dataREL), method='bray')
sampleREL.dist
# Distance among OTUs in terms of relative abundance
otuREL.dist <- vegdist(dataREL, method='bray')
otuREL.dist
# Principal coordinates analysis
samplePA.pcoa <- cmdscale(samplePA.dist)
samplePA.pcoa
sampleREL.pcoa <- cmdscale(sampleREL.dist)
sampleREL.pcoa
samplePA.clust <- hclust(samplePA.dist)
samplePA.clust
sampleREL.clust <- hclust(sampleREL.dist)
sampleREL.clust
plot(samplePA.pcoa[,1], samplePA.pcoa[,2])
plot(sampleREL.pcoa[,1], sampleREL.pcoa[,2])
# quartz()
par(mfrow=c(2,2))
plot(samplePA.pcoa[,1], samplePA.pcoa[,2], cex=0)
text(samplePA.pcoa[,1], samplePA.pcoa[,2], seq(1,4), cex=1)
plot(sampleREL.pcoa[,1], sampleREL.pcoa[,2], cex=0)
text(sampleREL.pcoa[,1], sampleREL.pcoa[,2], seq(1,4), cex=1)
plot(samplePA.clust)
plot(sampleREL.clust)
heatmap(dataREL, scale='none', labCol=c('S1', 'S2', 'S3', 'S4'))
|
307b55a3290a5e6154f94b2aff6826ad7bd3efc2
|
e2465ed21b79e20648ff98d99d98d3546000c44f
|
/RCode/helper_functions/get_forecast_data.R
|
0f7556c5631a5ccff8ac86fed2a56bfe1c4587ba
|
[] |
no_license
|
GLEON/Bayes_forecast_WG
|
7b58fa482a6e6165ee0e11cb3c53c932c93db8bb
|
14f61371c22c8cdc1a00cadfa37e92273eec6e78
|
refs/heads/master
| 2023-05-21T22:01:08.370135
| 2020-04-09T16:07:58
| 2020-04-09T16:07:58
| 250,070,619
| 2
| 11
| null | 2023-01-24T02:58:49
| 2020-03-25T19:21:20
|
R
|
UTF-8
|
R
| false
| false
| 571
|
r
|
get_forecast_data.R
|
#Title: Reading in appropriate data files for hindcasts for each model
#Author: Mary Lofton
#Date: 03MAR20
get_forecast_data <- function(model_name){
if(model_name == "Seasonal_RandomWalk" | model_name == "Seasonal_RandomWalk_Obs_error" | model_name = "Seasonal_AR"){
y <- log(as.matrix(read_csv("./Datasets/Sunapee/SummarizedData/Midge_year_by_week_totalperL_22JUL19.csv"))+0.003)
forecast_y <- log(as.matrix(read_csv("./Datasets/Sunapee/SummarizedData/Midge_year_by_week_totalperL_forecast_05OCT19.csv"))+0.003)
forecast_y <- forecast_y[7:8,]
}
}
|
7e5b443f318cfa8e6bd44e996b140f0c2793c060
|
bf88ed37a6e7769a73fe2e80c95132578de04a97
|
/superbowlsquares/dashboard/SuperBowlBoxes/app.R
|
6e2900faf973d10f7b29043ebd5f17632928d43c
|
[] |
no_license
|
mikemaieli/projects
|
82785a4f8e6b50252ddb34b77ec76edb83fb6394
|
a73a8092eb51480bda22868c680ddc5e8a8989d5
|
refs/heads/main
| 2023-07-14T19:05:48.762651
| 2023-07-03T12:53:47
| 2023-07-03T12:53:47
| 300,467,230
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,898
|
r
|
app.R
|
# load packages
library(shiny)
library(tidyverse)
library(hrbrthemes)
# load and mutate data
score_data <- read_csv("https://raw.githubusercontent.com/mikemaieli/superbowlsquares/master/superbowlscores.csv")
# build UI
ui <- fluidPage(
# Application title
titlePanel("Historical Super Bowl Box Winners"),
("The table below shows how often each combination of super bowl boxes win over the past 50 years. **Dashboard is a work in progress developed by Mike Maieli**"),
br(),
br(),
br(),
plotOutput("heatmap"),
br(),
hr(),
# Sidebar with a slider input for number of bins
fluidRow(
column(6,
h4("Filter The Data"),
sliderInput("yearInput",
"Year",
min = 1960,
max = 2020,
value = c(1967, 2019),
sep = "",
ticks = 10),
),
column(6,
h4("Choose the year"),
radioButtons("quarterInput", "Quarter",
choices = c("All" = "all",
"1st quarter" = "1",
"2nd quarter" = "2",
"3rd quarter" = "3",
"4th quarter" = "4",
"Overtime" = "5"),
selected = "all"),
)
)
)
# build server
server <- function(input, output, session) { digit_counts <- reactive({
if (input$quarterInput == "all") {
score_data %>%
mutate(afc_digit = afc_total_score %% 10, nfc_digit = nfc_total_score %% 10) %>%
select(year, superbowl, quarter, afc_digit, nfc_digit) %>%
mutate_all(as.character) %>%
filter(year >= input$yearInput[1],
year <= input$yearInput[2]) %>%
group_by(afc_digit, nfc_digit) %>%
summarize(occurances = n())
} else {
score_data %>%
mutate(afc_digit = afc_total_score %% 10, nfc_digit = nfc_total_score %% 10) %>%
select(year, superbowl, quarter, afc_digit, nfc_digit) %>%
mutate_all(as.character) %>%
filter(year >= input$yearInput[1],
year <= input$yearInput[2],
quarter == input$quarterInput) %>%
group_by(afc_digit, nfc_digit) %>%
summarize(occurances = n())
}})
# build output
output$heatmap <- renderPlot({
ggplot(digit_counts(), aes( x = afc_digit,
y = nfc_digit)) +
geom_tile(aes(fill = occurances), color = "black") +
geom_text(aes(label = scales::percent((occurances/sum(digit_counts()$occurances)))),
color = "white",
size = 6,
fontface = "bold") +
scale_fill_gradient(low = "cadetblue", high = "darkslategray", na.value = "white") +
scale_x_discrete(position = "top",
limits = c("0","1","2","3","4","5","6","7","8","9")) +
scale_y_discrete(limits = rev(c("0","1","2","3","4","5","6","7","8","9"))) +
labs(x = "AFC",
y = "NFC") +
theme_minimal() +
theme(panel.grid.major = element_blank(),
legend.position = "none",
axis.text = element_text(size = 16),
axis.title.y = element_text(margin = margin(t = 0, r = 20, b = 0, l = 0),
size = 16,
face = "bold"),
axis.title.x = element_text(margin = margin(t = 0, r = 0, b = 0, l = 0),
size = 16,
face = "bold")) +
geom_vline(xintercept = c(.5,1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5,9.5,10.5), color = "black", size = .3) +
geom_hline(yintercept = c(.5,1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5,9.5,10.5), color = "black", size = .3)
})}
# create app
shinyApp(ui = ui, server = server)
|
c0979f70ef95465c4f404ec0f0764eab4a28735f
|
cb44bd2723a9e83fcd79f319e4d5303fd3298178
|
/cachematrix.R
|
216d2b527e1b80879b17e11d431bbb2f91152a36
|
[] |
no_license
|
kingofharts/ProgrammingAssignment2
|
356d7118c0d6d049ace2cee2789f7cf9a84dfc2a
|
98c5ec562e1f099b6ded191cb7e220ca4033cfbd
|
refs/heads/master
| 2021-01-17T07:58:00.418939
| 2014-04-21T21:45:59
| 2014-04-21T21:45:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,831
|
r
|
cachematrix.R
|
## Reply to Peer Assessed 2nd Programming Assignment for R Programming
## nicholas.paul.hartman
## Create list of functions to set, get, invert, and retrieve the
## inversion of a Matrix
makeCacheMatrix <- function(x = matrix()) { # function name & arguments
m <- NULL # create cache object
set <- function(y) { # create 'set', list.func 1
x <<- y # assign new values to x
m <<- NULL # reset cache object
}
get <- function() x # create 'get', list.func 2
setinverse <- function(solve) m <<- solve
# create 'setinverse', list.func 3; eval, assign cache obj
getinverse <- function() m
# create 'getinverse', list.func 4; retrieve cache obj
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse) # compile func list
}
## Return cached - or cache if uncached and then return -
## inversion of a matrix
cacheSolve <- function(x, ...) { # function name & arguments
m <- x$getinverse() # create, assign func returned obj
if(!is.null(m)) { # if rtrnd obj pre-calc'ed,
message("getting cached inverse") # indicate and
return(m) # return obj
} # else
data <- x$get() # retrieve values (matrix) for evaluation
m <- solve(data, ...) # invert values (matrix), assign rtrnd obj
x$setinverse(m) # assign cache obj for future retrieval
m # return matrix inversion
}
|
865a6afd8c9f2b1db6e53914fc24aeea8fdddf7e
|
3cd8f6adb931d82ceea55cd58a746f4a0d5ec1fa
|
/Week 4/Programming Assignment 3/rankall.R
|
a193a6fcab7b2b1493f9dd4b213d905b49126171
|
[] |
no_license
|
krozic/JHU-R
|
c1a415fd9d9fb083cc21e146c4f7810e1c36e3af
|
564efc456aaea27803a4b88626fd7427079788b2
|
refs/heads/master
| 2022-07-03T04:04:16.161522
| 2020-05-10T22:22:05
| 2020-05-10T22:22:05
| 262,083,418
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,563
|
r
|
rankall.R
|
rankall <- function(outcome, num = "best") {
outcome_data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
outcomes <- c("heart attack", "heart failure", "pneumonia")
col_vals <- c(11, 17, 23)
names(col_vals) <- outcomes
col_num = as.numeric(col_vals[outcome])
if (!(state %in% outcome_data$State)) {
stop("invalid state")
}
if (!(outcome %in% outcomes)) {
stop("invalid outcome")
}
all <- data.frame()
states <- sort(unique(outcome_data$State))
for (state in states) {
state_data <- as.data.frame(split(outcome_data, outcome_data$State)[state])
ordered_data <- state_data[order(as.numeric(state_data[, col_num]), state_data[, 2], na.last = NA),]
max_rank <- length(ordered_data[, col_num])
if (num == "best") {
hospital <- ordered_data[1, 2]
} else if (num == "worst") {
hospital <- ordered_data[max_rank, 2]
} else if (num > max_rank) {
hospital <- NA
} else {
hospital <- ordered_data[num, 2]
}
all <- rbind(all, c(hospital, state))
}
rownames(all) <- states
colnames(all) <- c("hospital", "state")
all
}
|
421d49b20e187969efc1d38ca31faf7f412d5538
|
0c6404d89e67b07beaf5e77dcb05d93846a237cd
|
/Customer Segmentation/MKTProject1-Team4.R
|
02cfeae19b974a0a0f4d4ef2f1e7b4df8be61a17
|
[] |
no_license
|
nbansal2020/Marketing
|
a4769f0fbd36a304dcaf916b444823066a2fdb6c
|
85df71375257bf1bee9a1372314087a3480c5288
|
refs/heads/main
| 2023-03-29T21:24:05.018353
| 2021-04-13T02:12:45
| 2021-04-13T02:12:45
| 341,675,155
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,372
|
r
|
MKTProject1-Team4.R
|
library(readr)
library(dplyr)
library(plotly)
library(lubridate)
library(tidyverse)
library(cluster) #need for knn
library(factoextra) #need for knn
library(GGally)
library(plotly)
# IMPORTING DATA ----------------------------------------------------------
product <- read_csv("product_table.csv")
transaction <- read_csv("transaction_table.csv")
# No NAs in the data files
is.na(product)
is.na(transaction$tran_prod_sale_amt)
is.na(transaction)
# DATA CLEANING -----------------------------------------------------------
# Examining the structure of data - all the variable types
str(product)
# convert IDs into factors from double/integer
product$prod_id <- as.factor(product$prod_id)
product$subcategory_id <- as.factor(product$subcategory_id)
product$category_id <- as.factor(product$category_id)
str(transaction)
transaction$cust_id <- as.factor(transaction$cust_id)
transaction$tran_id <- as.factor(transaction$tran_id)
transaction$store_id <- as.factor(transaction$store_id)
transaction$prod_id <- as.factor(transaction$prod_id)
# Remove transactions that contain "bags" as these are the bags purchased during check out and do not really help our marketing strategy
transaction <- subset(transaction, prod_id != 999231999)
# Look for transaction amount paid that has negative values. It looks like the discount amount is larger than the purchase amount. This could be a return that was discounted to return the initial money back. There are a total of 8 transactions that satisfy this. We need to match them back to their original purchase transactions and remove those as well so there are no discrepancies.
neg_trans_return <- transaction[transaction$tran_prod_paid_amt < 0,]
transaction <- subset(transaction, tran_prod_paid_amt > 0)
# Removing all of the original transactions to avoid double counting transactions and projecting higher sales than actual.
transaction[!(transaction$cust_id==93409897 & transaction$prod_id == 357541011 & transaction$prod_unit_price == 0.55 & transaction$store_id == 340),]
transaction[!(transaction$cust_id == 93409897 & transaction$prod_id == 357541011 & transaction$prod_unit_price==0.55 & transaction$store_id == 340),]
transaction[!(transaction$cust_id == 73479594 & transaction$prod_id == 999241421 & transaction$prod_unit_price==16.90),]
transaction[!(transaction$cust_id == 40099908 & transaction$prod_id == 999250092 & transaction$prod_unit_price==1.59 & transaction$store_id == 344),]
transaction[!(transaction$cust_id == 51749812 & transaction$prod_id == 999264989 & transaction$prod_unit_price==0.30 & transaction$store_id == 325),]
transaction[!(transaction$cust_id == 42509966 & transaction$prod_id == 999295518 & transaction$prod_unit_price==3.59 & transaction$store_id == 984 & transaction$tran_dt == 2016-03-26),]
transaction[!(transaction$cust_id == 16339676 & transaction$prod_id == 999436833 & transaction$prod_unit_price==5.49 & transaction$store_id == 576 & transaction$tran_dt == 2016-03-26),]
transaction[!(transaction$cust_id == 7869780 & transaction$prod_id == 999476721 & transaction$prod_unit_price==3.29 & transaction$store_id == 988),]
# The transaction ID in the dataset does not uniquely identify each transaction due to its large number format. So we will create a new column that will concatenate the customer id, store id, and transaction date columns. We will do this under the assumption that each customer only visits a store once during the day and this will allow us to group together all the products purchased into a single transaction.
transaction$new_tran_id <- paste(transaction$cust_id, transaction$store_id, transaction$tran_dt, sep="-")
View(transaction)
# We need to check whether each transaction amount checks out, We can do this by subtracting the discount amount from the total amount and verifying if this value equals the amount paid.
transaction$verify_total_amt <- transaction$tran_prod_sale_amt + transaction$tran_prod_discount_amt
transaction$verify_total <- identical(transaction[['verify_total_amt']],transaction[['tran_prod_paid_amt']])
subset(transaction, verify_total == TRUE) # All values add up
# Extract day of the week from to see what days are most popular
transaction$day <- weekdays(as.Date(transaction$tran_dt))
pop_day <- transaction %>%
group_by(day) %>%
count() %>%
arrange(desc(n))
pop_day #Saturday is the busiest day followed by friday, mondays are the lowest
ggplot(transaction, aes(day)) + geom_bar()
# Amount of money spent on any given day of the week (aggregate)
amount_per_day <- transaction %>%
group_by(day) %>%
summarise(total = sum(tran_prod_paid_amt))
amount_per_day #Mondays are the slowest days so this is useful as we can create some incentive for people on Mondays
# DATA EXPLORATION --------------------------------------------------------
## Customers
# Which customer has the most transactions?
transaction %>%
group_by(cust_id, store_id) %>%
count(new_tran_id) %>%
arrange(desc(n)) #customer 92619600 shopped at store 543 - 334 times
# Which customer has spent the most?
transaction %>%
group_by(cust_id) %>%
summarize_at("tran_prod_paid_amt", sum) %>%
arrange(desc(tran_prod_paid_amt)) #customer 96879682 has spent the most on transactions - 14020 euros
# Which customer has spent the most per store?
transaction %>%
group_by(cust_id, store_id) %>%
summarize_at("tran_prod_paid_amt", sum) %>%
arrange(desc(tran_prod_paid_amt)) #customer 13489682 has spent the most (13339 euros) at store 695.
# Which customer has bought the most products?
transaction %>%
group_by(cust_id, prod_id) %>%
count() %>%
arrange(desc(n)) #customer 72999968 and 72999968 bought the most products - 688 and 674 units of bread respectively
# Other popular products among customers with high transactions are - beverage mixers, beer, bread, frozen bread, coffee, and milk.
popular_prods <- subset(product, prod_id == 999251927 | prod_id == 999951864 | prod_id == 999746519 | prod_id == 999478576 | prod_id == 999192126 | prod_id == 999742491 | prod_id == 999305477)
# Which customers buy the most using coupons/offers?
transaction %>%
group_by(cust_id) %>%
summarize_at("tran_prod_offer_cts", sum) %>%
arrange(desc(tran_prod_offer_cts)) # we can look at these customers and see what they usually buy since they use the most offers
cust_most_offers <- subset(transaction, cust_id == 73979986 | cust_id == 18239665 | cust_id == 80579664)
## Store
# Which store has made the highest revenue?
transaction %>%
group_by(store_id) %>%
summarize_at("tran_prod_paid_amt", sum) %>%
arrange(desc(tran_prod_paid_amt)) #store 342 has made the highest revenue (784655 euros) combined of all customers
# What is the average discount per store?
discount <- transaction %>%
group_by(store_id) %>%
summarize_at("tran_prod_discount_amt", sum) %>%
arrange(desc(tran_prod_discount_amt))
median(discount$tran_prod_discount_amt) # Median discount per store of $23092
discount_per_prod <- transaction %>%
group_by(store_id, prod_id) %>%
summarize_at("tran_prod_discount_amt", sum) %>%
arrange(desc(tran_prod_discount_amt))
median(discount_per_prod$tran_prod_discount_amt) # Median discount per product in store is 0.79
# Which store gives out the most offers?
transaction %>%
group_by(store_id) %>%
summarize_at("tran_prod_offer_cts", sum) %>%
arrange(desc(tran_prod_offer_cts)) #store 349, 342, 345, 344 have given the most offers
store_most_offers_prods <- subset(transaction, store_id %in% c(349, 342, 345, 344, 343, 346, 341, 347, 588, 157, 994, 335, 321, 331, 525, 572, 307, 315, 332, 988, 395, 996, 627, 348, 673))
store_most_offers_prods %>%
group_by(prod_id) %>%
count(prod_id) %>%
arrange(desc(n)) # This will give us the most popular products sold at the stores that give out the most offers
# carrots, banana, milk, onion, citrus fruit, mineral water, onion, zucchini
pop_prods_offers <- subset(product, prod_id %in% c(999956795, 999361204, 999953571, 999680491, 999712725, 999401500, 999951863, 999957158))
## Products
# What are the products that are most bought?
transaction %>%
group_by(prod_id) %>%
count() %>%
arrange(desc(n))
pop_prod <- subset(product, prod_id == 999956795 | prod_id == 999361204 | prod_id == 999951863 | prod_id == 999746519 |prod_id == 999401500 |prod_id == 999712725 |prod_id == 999749894 |prod_id == 999953571 |prod_id == 999356553 | prod_id == 999957158)
View(pop_prod) # Most bought products are - sugar, carrots, mineral water, onion, drinks, fresh pork, milk, citrus, banana, and zucchini
# Which is the most expensive product and what is the distribution of product prices in the dataset? Histogram
transaction %>%
group_by(prod_id) %>%
print(max(unit_prod_price))
ggplot(transaction, aes(prod_unit_price)) +
geom_histogram(binwidth=1) +
xlim(0,25) #No item sold for $0 so nothing is completely free
# What are the products that are most bought using offers/coupons?
transaction %>%
group_by(prod_id) %>%
summarize_at("tran_prod_offer_cts", sum) %>%
arrange(desc(tran_prod_offer_cts))
#Carrot, coffee, banana, oil, tomato, onion, milk, green bean, zucchini are most bought using coupons
prod_most_coupons <- subset(product, prod_id == 999956795 | prod_id == 999361204 | prod_id == 999746519 | prod_id == 999951863 | prod_id == 999712725 | prod_id == 999967197 | prod_id == 999957158 | prod_id == 999957157 | prod_id == 999421692 | prod_id == 999626930)
View(prod_most_coupons)
# Which products generate the highest revenue?
transaction %>%
group_by(prod_id) %>%
summarise(high_rev = sum(tran_prod_paid_amt)) %>%
arrange(desc(high_rev))
high_rev_prod = subset(product, prod_id == 999749469 | prod_id == 999956795 | prod_id == 999749894 | prod_id == 999455829 | prod_id == 999649801 | prod_id == 999747259 | prod_id == 999557956 | prod_id == 999955966 | prod_id == 999749460 | prod_id == 999696393)
View(high_rev_prod)
# KMEANS CLUSTERING -------------------------------------
# Customer
# Variable 1: How many units of a product did a person purchase
prod_purch <- transaction %>%
group_by(cust_id, prod_unit) %>%
summarise(num_prod = sum(tran_prod_sale_qty)) %>%
arrange(desc(num_prod))
View(prod_purch)
# Pivoting the table so we have counts separated from KG
prod_purch <- prod_purch %>%
pivot_wider(names_from = prod_unit, values_from = num_prod)
View(prod_purch)
# Variable 2: How much did each person spend
amount_spent <- transaction %>%
group_by(cust_id) %>%
summarise(amt_spent = sum(tran_prod_paid_amt)) %>%
arrange(desc(amt_spent))
View(amount_spent)
# Variable 3: How many items did they buy in 1 transaction
num_products <- transaction %>%
group_by(cust_id) %>%
count() %>%
arrange(desc(n))
View(num_products)
# Variable 4: How many coupons did they use
num_coupons <- transaction %>%
group_by(cust_id) %>%
summarize_at("tran_prod_offer_cts", sum) %>%
arrange(desc(tran_prod_offer_cts))
View(num_coupons)
# Variable 5: How much of a discount are they getting
discount_total <- transaction %>%
group_by(cust_id) %>%
summarise(discount = sum(tran_prod_discount_amt)) %>%
arrange(desc(discount))
View(discount_total)
# Making a dataframe of all the variables we want to include in our Customer Clustering
knn_df <- data.frame(prod_purch, amount_spent, num_products, num_coupons, discount_total)
knn_df <- subset(knn_df, select = c("cust_id", "CT", "KG", "amt_spent", "n", "tran_prod_offer_cts", "discount"))
View(knn_df)
# Scaling the data as knn is sensitive to this
knn_df_scaled <- scale(knn_df[,2:7])
View(knn_df_scaled)
# First iteration of the clustering
k1 <- kmeans(knn_df_scaled, centers = 2, nstart = 25)
str(k1)
fviz_cluster(k1, data= knn_df_scaled)
k1
# Finding the ideal number of clusters we should have
##Elbow Method
wss <- function(k) {
kmeans(knn_df_scaled, k, nstart = 10 )$tot.withinss
}
k.values <- 1:15
wss_values <- map_dbl(k.values, wss)
plot(k.values, wss_values,
type="b", pch = 19, frame = FALSE,
xlab="Number of clusters K",
ylab="Total within-clusters sum of squares")
fviz_nbclust(knn_df_scaled, kmeans, method = "wss")
##Average Silhouette Method
avg_sil <- function(k) {
km.res <- kmeans(knn_df_scaled, centers = k, nstart = 25)
ss <- silhouette(km.res$cluster, dist(knn_df_scaled))
mean(ss[, 3])
}
k.values <- 2:15
avg_sil_values <- map_dbl(k.values, avg_sil)
plot(k.values, avg_sil_values,
type = "b", pch = 19, frame = FALSE,
xlab = "Number of clusters K",
ylab = "Average Silhouettes")
fviz_nbclust(knn_df_scaled, kmeans, method = "silhouette")
# Final KMeans Clustering
final <- kmeans(knn_df_scaled, centers = 4, nstart = 25)
str(final)
final
# Plotting the final clusters for customer segments
customer_kmeans_plot <- fviz_cluster(final, geom="point", data=knn_df_scaled) + ggtitle("Customer Clusters, k=4")
customer_kmeans_plot
# Mapping clusters back to original data
knn_df$clustering <- final$cluster
View(knn_df)
# Creating individual datasets for each cluster to further understand their needs
cluster1 <- subset(knn_df, clustering==1)
cluster2 <- subset(knn_df, clustering==2)
cluster3 <- subset(knn_df, clustering==3)
cluster4 <- subset(knn_df, clustering==4)
##Did a sanity check to make sure every customer was only segmented once
# Mapping back to transactions table
c1 <- subset(transaction, cust_id %in% cluster1$cust_id)
c2 <- subset(transaction, cust_id %in% cluster2$cust_id)
c3 <- subset(transaction, cust_id %in% cluster3$cust_id)
c4 <- subset(transaction, cust_id %in% cluster4$cust_id)
# Summary of clusters
summary(cluster1) #buying fewer things in count but more amount in KG with high discount amounts
summary(cluster2) #more quantity but with a lot of offers but total discount amount is low
summary(cluster3) # very average purchasing behavior
summary(cluster4) # High amounts purchased but rest of the behavior is average
# Interpreting clusterss
knn_df$clustering <- as.factor(knn_df$clustering)
(p <- ggparcoord(data=knn_df, columns=c(2:7), groupColumn="clustering", scale="std"))
# KMEANS CLUSTERING ----------------------------------
# Store
# Variable 1: Number of transactions
num_shopping <- transaction %>%
group_by(store_id) %>%
count() %>%
arrange(desc(n))
View(num_shopping)
# Variable 2: How many units are they selling
units <- transaction %>%
group_by(store_id, prod_unit) %>%
summarise(num_prod = sum(tran_prod_sale_qty)) %>%
arrange(desc(num_prod))
View(units)
# Pivoting the table so we have counts separated from KG
units <- units %>%
pivot_wider(names_from = prod_unit, values_from = num_prod)
View(units)
# Variable 3: How much revenue are they generating
revenue <- transaction %>%
group_by(store_id) %>%
summarise(high_rev = sum(tran_prod_paid_amt)) %>%
arrange(desc(high_rev))
View(revenue)
# Variable 4: How many discounts do they offer
discount <- transaction %>%
group_by(store_id) %>%
summarise(total_disc = sum(tran_prod_offer_cts)) %>%
arrange(desc(total_disc))
View(discount)
# Making a dataframe of all the variables we want to include in our Customer Clustering
knn_df_store <- data.frame(num_shopping, units, revenue, discount)
knn_df_store <- subset(knn_df_store, select = c("store_id", "n", "CT", "KG", "high_rev", "total_disc"))
View(knn_df_store)
# Scaling the data as knn is sensitive to this
knn_df_scaled_store <- scale(knn_df_store[,2:6])
View(knn_df_scaled_store)
# Remove any 0/NA Values
knn_df_scaled_store <- knn_df_scaled_store[1:419,]
# First iteration of the clustering
k1_store <- kmeans(knn_df_scaled_store, centers = 2, nstart = 25)
str(k1_store)
fviz_cluster(k1_store, data= knn_df_scaled_store)
k1_store
# Finding the ideal number of clusters we should have
##Elbow Method
wss <- function(k) {
kmeans(knn_df_scaled_store, k, nstart = 10 )$tot.withinss
}
k.values <- 1:15
wss_values <- map_dbl(k.values, wss)
plot(k.values, wss_values,
type="b", pch = 19, frame = FALSE,
xlab="Number of clusters K",
ylab="Total within-clusters sum of squares")
fviz_nbclust(knn_df_scaled_store, kmeans, method = "wss")
##Average Silhouette Method
avg_sil <- function(k) {
km.res <- kmeans(knn_df_scaled_store, centers = k, nstart = 25)
ss <- silhouette(km.res$cluster, dist(knn_df_scaled_store))
mean(ss[, 3])
}
k.values <- 2:15
avg_sil_values <- map_dbl(k.values, avg_sil)
plot(k.values, avg_sil_values,
type = "b", pch = 19, frame = FALSE,
xlab = "Number of clusters K",
ylab = "Average Silhouettes")
fviz_nbclust(knn_df_scaled_store, kmeans, method = "silhouette")
# Final KMeans Clustering
final_store <- kmeans(knn_df_scaled_store, centers = 3, nstart = 25)
str(final_store)
final_store
# Plotting the final clusters for store segments
store_kmeans_plot <- fviz_cluster(final_store, geom="point", data=knn_df_scaled_store) + ggtitle("Store Clusters, k=5")
store_kmeans_plot
# Mapping clusters back to original data
knn_df_store <- knn_df_store[1:419,]
knn_df_store$clustering <- final_store$cluster
View(knn_df_store)
# Creating individual datasets for each cluster to further understand their needs
store1 <- subset(knn_df_store, clustering==1)
store2 <- subset(knn_df_store, clustering==2)
store3 <- subset(knn_df_store, clustering==3)
store4 <- subset(knn_df_store, clustering==4)
store5 <- subset(knn_df_store, clustering==5)
##Similar to above - did a sanity check to make sure every store was only segmented once
# Mapping back to transactions table
s1 <- subset(transaction, store_id %in% store1$store_id)
s2 <- subset(transaction, store_id %in% store2$store_id)
s3 <- subset(transaction, store_id %in% store3$store_id)
s4 <- subset(transaction, store_id %in% store4$store_id)
s5 <- subset(transaction, store_id %in% store5$store_id)
View(s2)
# Interpreting clusters
knn_df_store$clustering <- as.factor(knn_df_store$clustering)
(p <- ggparcoord(data=knn_df_store, columns=c(2:6), groupColumn="clustering"))
|
32569c1cdc2ee3e33bb7efb701c82d6703e33d9c
|
0a5dd2ef993f265ad5058d8f8e5b9522f1439e03
|
/man/createMetaData.Rd
|
236788821d4c14b636549cd31c1a8933ff7b9f6e
|
[] |
no_license
|
oncoclass/DoseR
|
30ed6cf59761a2d3f7f3e15f8d7df5e3e8b76562
|
8cc057fe12d4f38720258cd3966ec28f22848ca3
|
refs/heads/master
| 2020-12-24T00:46:21.189550
| 2017-11-16T13:07:21
| 2017-11-16T13:07:21
| 17,335,054
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,284
|
rd
|
createMetaData.Rd
|
\name{createMetaData}
\alias{createMetaData}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Create an A.data object for storing the absorbance data.
}
\description{
This function creates an A.data object. This object stores all data related the dose response experiments which is saved in an .RData file. This ensures that the analyses is only conducted on new data including the time consuming bootstrap analysis. The most convinient approach for using the function is to create a metedata object directly from the .dbf filenames. Other possibilities include: 1) having an Excel document with experiment information, 2) A data.frame with experiment information, and 3) a list with dbf filenames and protocol filenames.
}
\usage{
createMetaData(data = NULL, data.file = file.path(getwd(), "Absorbance"),
save = TRUE,
namevar = "Cellline", drugvar = "chemo", protocolvar = "R.protocol",
identifier = "identifier", timevar = "Hour", correctionname = "Control",
incubationvar = "incubation", doublingvar = NULL, format = "long",
dbf.path = getwd(), protocol.path = getwd(), dbf.files = NULL,
file.extension = ".dbf", protocol.files = NULL, are.paths.full = TRUE,
colnames = c("namevar", "drugvar", "protocolvar",
"identifier", "timevar"),
sep = c(";"), namesep = " ", identifiersep = "_",
date.format = NULL, unit = "ug/ml", additional.metadata = NULL,
show.warnings = TRUE, update = TRUE, idcomb = NULL, idvar = "sampleid",
namecols = NULL, dbf.file.var = "dbf.file.name", shiny.input = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{
This may be a data frame containing the metadata for the dose response experiments or the path to an Excel file containg the metadat. Not used when the metadata is inferred from the filename.
}
\item{data.file}{
Character giving the path to the Excel metadata sheet created for the project. This is only created when the metadata is inferred from the filename.
}
\item{save}{
Should the data be saved.
}
\item{namevar}{
The column name containg the name of the cell line. Defeaults to \code{Cellline}.
}
\item{drugvar}{
Character denoting the name of the column specifying the drug used in the experiment. Defeaults to \code{chemo}.
}
\item{protocolvar}{
Character denoting the name of the column specifying the protocol used in the experiment. Defeaults to \code{R.protocol}.
}
\item{identifier}{
Character denoting the name of the column specifying an id for the experiment. Defeaults to \code{identifier} and in combination with \code{namevar}, \code{drugvar}, and \code{timevar} it is expected to be a unique identifier. It is used to indicate what experiments are run together, so it should be identical for separate time points. The date where the drug is added may be a good value to use.
}
\item{timevar}{
The column name specifying the number of hours the cell line was exposed to the drug. Defeaults to \code{Hour}.
}
\item{correctionname}{
The name used to indicate that this plate is to be used for correction of drugcolor. The name is to be included within the column \code{namevar} containg the names of the cell lines'. Defeaults to \code{Control}.
}
\item{incubationvar}{
The column name specifying the number of hours the cell line incubated with the MTS assay. Defeaults to \code{NULL} for which 2 hours is assummed for all experiments.
}
\item{doublingvar}{
Name of the column containg information of cell line doubling time when not treated with drug. This is only used if the experiment only contains 1 time point.
}
\item{format}{
Format of the metadata. Can be either long or wide format the created Excel sheet will also be in this format. Defaults to \code{long}.
}
\item{dbf.path}{
Path to the directory storing the dbf files.
}
\item{protocol.path}{
Path to the directory storing the protocol files.
}
\item{dbf.files}{
The path to each individual .dbf file.
}
\item{file.extension}{
The extension for the dBase files. defaults to \code{.dbf}
}
\item{protocol.files}{
The path to each individual protocol file.
}
\item{are.paths.full}{
Logical. if \code{TRUE} the paths are assummed to full.
}
\item{colnames}{
The order of the colnames specified by the filename for the .dbf file. Defeaults to \code{c("namevar", "drugvar", "protocolvar","identifier", "timevar")} which means that part 1) includes the name of the cell line, 2) specifies what drug was used in the experiment, 3) specifies the name of the protocol, 4) specify the unique identifier, and 5) the number of hours the cell line were exposed to a drug. Thus if the the cell line OCI-Ly7 was treated with Doxorubicin according to protocol DOX18 on the November 30th 2013, and exposed to the drug for 48 hours, the file name would be: OCI-Ly7;Doxorubicin;DOX18;20131130;48. The separator ";" is explained below.
}
\item{sep}{
The character used to specify column separation in the filename. Defealts to ";" rsulting in a filename as described above.
}
\item{namesep}{
Aditional separator for cell line name, defeaults to "_". The namecharater in the file name can further be used to split up experiments. E.g. the filenames
OCI-Ly7_cond1;Doxorubicin;DOX18;20131130;48 and OCI-Ly7_cond2;Doxorubicin;DOX18;20131130;48 says that cell line OCI-Ly7 is treated with Doxorubicin under two different conditions.
}
\item{identifiersep}{
Aditional separator for identifier, defeaults to "_". The indentifier can be the date used for the setup, however if the same cell line is treated with the same drug on the same date the filename will not be unique. Then we may use an additional seperator in the identifier name. E.g.\
OCI-Ly7;Doxorubicin;DOX18;20131130_1;48 and OCI-Ly7;Doxorubicin;DOX18;20131130_2;48 indicate that the two different experiments.
}
\item{date.format}{
If the identifier indicates the setup date for the experiment the format of the date is specified.
Defeaults to \code{NULL}.
}
\item{unit}{
The unit for the drug concentration. Defeaults to \code{ug/ml}. where u is used as substitute for \eqn{\mu}. If a column of the data supplied to argument \code{data} is named \code{unit} this column will be used as the unit.
}
\item{additional.metadata}{
An optinal additional metadata set containg information regarding the cell lines. The first column must specify the name of the cell line.
}
\item{show.warnings}{
Should warnings be displayed.
}
\item{update}{
Do you want to update an already made metasheet or create a new.
}
\item{idcomb}{
Combination of column names that identifies an experiment id (Optional)
}
\item{idvar}{
The variable name used for id (Optional)
}
\item{namecols}{
If the name part of the file name is separated using \code{namesep} the resulting additional column names are given here.
}
\item{dbf.file.var}{
Name of the column in the created metadata containg the filename (optional)
}
\item{shiny.input}{
Used for the shiny web application.
}
}
\details{
When conducting dose response experiments the amount of .dbf becomes large thus metadata is normally stored in Excel sheets. This function converts such data into an \code{A.data} object of class \code{createMetaData}. Since many mistakes occur when copy pasting filenames into Excel the function can also be used to create metadata from filenames provided the filenames are of a certain structure, such as: OCI-Ly7;Doxorubicin;DOX18;20131130;48.dbf.
}
\value{
The ouput of the function is an A.data object of class \code{createMetaData}. This is a list with the following component
\item{meta.list}{This is a list of meta data objects.}
\item{call}{A list containing information regarding the call to the function.}
\item{auxiliary}{List of auxiliary data used by other functions.}
The meta.list output contains
\item{metadata.full}{Which is the metadata for all dose response experiment in eihter long or wid format.}
\item{metadata.correction}{Which is the metadata for dose response experiment used for correction of drug colour.}
\item{metadata.new}{Metadata for all experiments not used to correct for drug colour.}
\item{additional}{The metadata sheet supplied to \code{additional.metadata}.}
\item{metadata.long}{Metadata for all dose response experiments sorted in long format.}
}
\references{
Steffen Falgreen et al. Exposure time independent summary statistics for assessment of drug dependent cell line growth inhibition (2013)
}
\author{
The function was written at department of haematology, Aalborg University Hospital and maintained by Steffen Falgreen.
}
\note{
Following the creation of the A.data object the function \code{\link{readDBFData}} is used to read the dBase files into R.
}
\seealso{
\code{\link{readDBFData}}
}
\examples{
## Example of creating the metadata based on file names
## A.data <- createMetaData(
## dbf.path = data.path, # data.path is a predifined directory storing the dbf files
## protocol.path = protocol.path, # protocol.path is a predifined directory storing the Excel protocol files
## colnames = c("namevar", "drugvar", "protocolvar", ## The order of colnames specified by the file names:
## "identifier", "timevar"), ## OCI-Ly7;Doxorubicin;DOX18;20131130;48
## sep = ";", # The separator used in the filename as noted above
## namevar = "name",
## drugvar = "chemo",
## protocolvar = "R.protocol",
## identifier = "identifier",
## timevar = "Hour",
## namecols = "serum",
## date.format = "%d%m%y",
## correctionname = "Control", # The name specified in the column "namevar" for drug colour correction plates
## unit = "ug/ml",
## additional.metadata = file.path(BCell.ext.dir, "cell.line.metadata.xls"),
## show.warnings = TRUE,
## update = TRUE,
## format = "long",
## data.file = file.path(BCell.gen.dir, "Absorbance.test"))
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
a234791401246df918e25ada736efd1c77fae612
|
2d3cb59bde33733306bd9007d4cc0d03d73f319a
|
/JHSV3TabOne.R
|
017fafe6f02d7104e02dcc57cec23092d88d2091
|
[] |
no_license
|
lizlitkowski/JHSGut
|
967eef726993553c560473c4d7d23f4bcb7f8321
|
a9f503a89fcaa1f08b12c6f1284fafef9dd684e8
|
refs/heads/master
| 2020-06-05T23:05:03.810082
| 2019-10-01T16:59:32
| 2019-10-01T16:59:32
| 192,569,681
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,680
|
r
|
JHSV3TabOne.R
|
# Create Table One for Jackson Heart Study grant
rm(list=ls())
library(tableone)
library(sas7bdat)
dir_data = "C:/Users/litkowse/Desktop/Vanguard_2016/Vanguard_2016/data/AnalysisData/1-data/CSV/"
save1_dir = "C:/Users/litkowse/Desktop/Vanguard_2016/Vanguard_2016/data/Visit1/"
save2_dir = "C:/Users/litkowse/Desktop/Vanguard_2016/Vanguard_2016/data/Visit2/"
save3_dir = "C:/Users/litkowse/Desktop/Vanguard_2016/Vanguard_2016/data/Visit3/"
visit1 <- read.csv(file = paste(dir_data, "analysis1.csv",sep = ""), header=T,sep=",")
v1trhtn = read.sas7bdat(paste(save1_dir, "trhtn_v1.sas7bdat", sep=""))
v1total = merge(visit1,v1trhtn, by = "subjid")
visit2 <- read.csv(file = paste(dir_data, "analysis2.csv",sep = ""), header=T,sep=",")
v2trhtn = read.sas7bdat(paste(save2_dir, "trhtn_v2.sas7bdat", sep=""))
v2total = merge(visit2,v2trhtn, by = "subjid")
visit3 <- read.csv(file = paste(dir_data, "analysis3.csv",sep = ""), header=T,sep=",")
v3trhtn = read.sas7bdat(paste(save3_dir, "trhtn_v3.sas7bdat", sep=""))
v3total = merge(visit3,v3trhtn, by = "subjid")
common_cols <- intersect(colnames(v1total), colnames(v2total))
common_cols <- intersect(colnames(v3total), common_cols)
totrbind = rbind(v1total[,common_cols],v2total[,common_cols],v3total[,common_cols])
totrbind$eGFRCat = "Missing"
totrbind[which(totrbind$eGFRckdepi >= 90),111] = "Stage 1:Normal"
totrbind[which(totrbind$eGFRckdepi >= 60 & totrbind$eGFRckdepi < 90 ),111] = "Stage 2:Mild CKD"
totrbind[which(totrbind$eGFRckdepi >= 30 & totrbind$eGFRckdepi < 60 ),111] = "Stage 3:Moderate CKD"
totrbind[which(totrbind$eGFRckdepi >= 15 & totrbind$eGFRckdepi < 30 ),111] = "Stage 4:Severe CKD"
totrbind[which(totrbind$eGFRckdepi < 15 ),111] = "Stage 5:End Stage CKD"
write.csv(totrbind, file = "C:/Users/litkowse/Desktop/jhsegfr.csv")
#write.csv(totrbind,file = paste(dir_data,"grsStep1.csv"))
table(totrbind$eGFRCat)
varsToFactor <- c("sex","BPjnc7","hdl3cat","ldl5cat", "CHDHx", "CVDHx", "MIHx","prevatrh","uncontrolledbp","Diabetes", "eGFRCat")
totrbind[varsToFactor] <- lapply(totrbind[varsToFactor], factor)
vars <- c("age","sex","sbp","dbp","BPjnc7","eGFRckdepi","eGFRCat","HSCRP", "hdl","hdl3cat","ldl","ldl5cat","trigs","CHDHx","CVDHx","MIHx","prevatrh","uncontrolledbp","Diabetes","BMI","waist")
dput(names(totrbind))
tableOne <- CreateTableOne(vars = vars, data = totrbind, strata = "visit")
file.out <- paste (dir_data,"jhstabone.csv",sep ="" )
write.csv(print(tableOne,noSpaces=T),file.out)
event_dir = "C:/Users/litkowse/Desktop/Vanguard_2016/Vanguard_2016/data/Events/1-data/CSV/"
chdev = read.csv(file = paste(event_dir, "incevtchd.csv",sep = ""), header=T,sep=",")
table(chdev$CHD.Last.Contact.Type)
hfev = read.csv(file = paste(event_dir, "incevthfder.csv",sep = ""), header=T,sep=",")
table(hfev$Last.Contact.Type)
table(hfev$AFU.Combined.Last.Contact.Type)
strokev = read.csv(file = paste(event_dir, "incevtstroke.csv",sep = ""), header=T,sep=",")
table(strokev$Last.Contact.Type)
cohort_dir = "C:/Users/litkowse/Desktop/Vanguard_2016/Vanguard_2016/data/Cohort/1-data/CSV/"
mort = read.csv(file = paste(cohort_dir, "deathltfucohort.csv",sep = ""), header=T,sep=",")
table(mort$Death.Indicator)
step1 = merge(chdev,mort, by.x = "Participant.ID", by.y = "PARTICIPANT.ID")
step2 = merge(step1,strokev, by.x = "Participant.ID", by.y = "Participant.ID" )
inctab = merge(step2,hfev, by.x = "Participant.ID", by.y = "Cohort.ID" )
table(hfev$Last.Contact.Type)
table(hfev$AFU.Combined.Last.Contact.Type)
table(inctab$hard.CHD.Last.Contact.Type)
varsToFactor <- c("CHD.Last.Contact.Type","Last.Contact.Type.x","Last.Contact.Type.y","Death.Indicator")
inctab[varsToFactor] <- lapply(inctab[varsToFactor], factor)
vars <- c("CHD.Last.Contact.Type","Last.Contact.Type.x","Last.Contact.Type.y","Death.Indicator")
dput(names(inctab))
tableOne <- CreateTableOne(vars = vars, data = inctab)
file.out <- paste (dir_data,"inctab.csv",sep ="" )
write.csv(print(tableOne,noSpaces=T),file.out)
# To add the incidents after Visit 3
visitevent = merge(chdev,visit3, by.x = "Participant.ID", by.y = "subjid")
visitevent$sinceV3 = as.Date(visitevent$CHD.Event.or.Censoring.Date,format='%m/%d/%Y') > as.Date(visitevent$VisitDate,format='%m/%d/%Y')
visitevent$incaftV3 = visitevent$sinceV3 & visitevent$Incidence.CHD == "Yes"
varsToFactor <- c("incaftV3")
visitevent[varsToFactor] <- lapply(visitevent[varsToFactor], factor)
vars <- c("incaftV3")
dput(names(visitevent))
tableOne <- CreateTableOne(vars = vars, data = visitevent, strata = "incaftV3")
# Used the eGFR ckdepi measure
# Add categories for eGFR
|
ecb79a3102613a328e81ff4648be1011f13c48ce
|
febaac19f141ff221137b2fc9ff830fa1673d4c7
|
/man/make_genepop.Rd
|
3d1b4edaa8ff1fc7eb98948ca1052f5732b09cee
|
[] |
no_license
|
mastoffel/sealABC
|
b686bf26afe4b5a57acd10bab2d22d9be5fc5e66
|
c1b2aa5b339fee3d92481b1ef0aa5604956596d9
|
refs/heads/master
| 2020-04-12T08:15:25.064329
| 2018-01-08T11:20:38
| 2018-01-08T11:20:38
| 62,786,078
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 686
|
rd
|
make_genepop.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_genepop.R
\name{make_genepop}
\alias{make_genepop}
\title{format allelic microsats to genepop}
\usage{
make_genepop(x)
}
\arguments{
\item{x}{genotypes}
}
\description{
outputs a data.frame with genepop format
}
\details{
so far, the input is the standard format
for the bottleneck analysis, i.e. first column "id",
second column "pop", third column "cluster", all
following columns are loci (two columns per locus).
"id" and "cluster" will be deleted and "pop" will
be kept for the formatting.
}
\author{
Emily Humble (emily.lhumble@gmail.com)
Martin Stoffel (martin.adam.stoffel@gmail.com)
}
|
4975b891e60515d5f910e4f19f3860ce04e9f7bd
|
d56904e67efc6cd5bcf7d1c7a37a3b083843d817
|
/run_analysis.R
|
63c918417111d624ade017d7b132cef6cb8729e1
|
[] |
no_license
|
henryleineweber/GettingAndCleaningDataFinal
|
5ccaadde4ad756c589cdcf001c279bc5f6eb7dca
|
4e85b57b621193edfcf8438632846b86be89547b
|
refs/heads/master
| 2021-01-11T03:09:39.629753
| 2016-10-16T23:52:17
| 2016-10-16T23:52:17
| 71,084,489
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,522
|
r
|
run_analysis.R
|
# Set working directory, download data, and extract files to directory
setwd("./RunAnalysis")
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", destfile = "getdata_dataset.zip")
unzip("getdata_dataset.zip")
# Following will merge all data files in the extracted folder into a single data set
## Read and assign data names
features <- read.table("./UCI HAR Dataset/features.txt", header = FALSE)
activity <- read.table("./UCI HAR Dataset/activity_labels.txt", header = FALSE)
s_train <- read.table("./UCI HAR Dataset/train/subject_train.txt", header = FALSE)
x_train <- read.table("./UCI HAR Dataset/train/X_train.txt", header = FALSE)
y_train <- read.table("./UCI HAR Dataset/train/Y_train.txt", header = FALSE)
s_test <- read.table("./UCI HAR Dataset/test/subject_test.txt", header = FALSE)
x_test <- read.table("./UCI HAR Dataset/test/X_test.txt", header = FALSE)
y_test <- read.table("./UCI HAR Dataset/test/Y_test.txt", header = FALSE)
## Set column names
colnames(activity) <- c("activityID","activityType")
colnames(s_train) <- "subjectID"
colnames(x_train) <- features[,2]
colnames(y_train) <- "activityID"
colnames(s_test) <- "subjectID"
colnames(x_test) <- features[,2]
colnames(y_test) <- "activityID"
## Merge all data into a training dataset and a test dataset
trainingdata <- cbind(s_train, x_train, y_train)
testdata <- cbind(s_test, x_test, y_test)
## Merge training and test data and get vector of new column headers
combined_data <- rbind(trainingdata, testdata)
headers <- colnames(combined_data)
# Extracts only the measurements on the mean and standard deviation for each measurement
extract <- (grepl("activity..", headers) | grepl("subject..", headers) | grepl("-mean..", headers) & !grepl("-meanFreq..", headers) & !grepl("mean..-", headers) | grepl("-std..", headers) & !grepl("-std()..-", headers));
working_data <- combined_data[extract == TRUE]
## Add activity descriptions
working_data <- merge(working_data, activity, by="activityID", all.x = TRUE)
headers <- colnames(working_data)
# Clean up column names in new data frame
for (i in 1:length(headers))
{
headers[i] = gsub("\\()","",headers[i])
headers[i] = gsub("-std$","StdDev",headers[i])
headers[i] = gsub("-mean","Mean",headers[i])
headers[i] = gsub("^(t)","time",headers[i])
headers[i] = gsub("^(f)","freq",headers[i])
headers[i] = gsub("([Gg]ravity)","Gravity",headers[i])
headers[i] = gsub("([Bb]ody[Bb]ody|[Bb]ody)","Body",headers[i])
headers[i] = gsub("[Gg]yro","Gyro",headers[i])
headers[i] = gsub("AccMag","AccMagnitude",headers[i])
headers[i] = gsub("([Bb]odyaccjerkmag)","BodyAccJerkMagnitude",headers[i])
headers[i] = gsub("JerkMag","JerkMagnitude",headers[i])
headers[i] = gsub("GyroMag","GyroMagnitude",headers[i])
}
colnames(working_data) = headers
# Create a second, independent tidy data set with the average of each variable for each activity and each subject
## Remove Activity Type column to avoid NA values when getting mean
working_data_noAT <- working_data[, names(working_data) != "activityType"]
## Create tidy data set
tidy_data <- aggregate(working_data_noAT, by=list(working_data_noAT$activityID, working_data_noAT$subjectID), FUN=mean)
## Merge Activity Type column back into dataframe
tidy_data <- merge(tidy_data, activity, by="activityID", all.x = TRUE)
# Export tidy data to working directory
write.table(tidy_data, "tidy_data.txt", row.names = FALSE, quote = FALSE)
|
b6540b3c9282e83a94fef82e5a47e23bdf67f734
|
7ecdf3fe6b4ed685372e06f3b31e22c074801426
|
/readData.R
|
594cfa94c596d3c504742c12b6871580190f9520
|
[] |
no_license
|
gmdn/ExData_Plotting1
|
b5114ce4d4dbc4d319c032ce49e033428b36b5bb
|
c4fb518f72d3f15b8e2268d539ae802f6dfcb874
|
refs/heads/master
| 2020-12-26T04:55:13.765963
| 2014-09-07T22:39:58
| 2014-09-07T22:39:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 976
|
r
|
readData.R
|
## read dataset
dat <- read.delim("~/Downloads/household_power_consumption.txt",
sep = ";",
header = T,
stringsAsFactors = F)
# build date time strings
datetime <- paste(dat[, 1], dat[, 2], sep = " ")
# convert to standart dates
dtm <- as.POSIXlt(datetime, format = "%d/%m/%Y %H:%M:%S")
# subset indexes
idxs <- which(as.Date(dtm) >= "2007-02-01" & as.Date(dtm) <= "2007-02-02")
# subset
subdat <- dat[idxs, ]
# convert to numeric
subdat$Global_active_power <- as.numeric(subdat$Global_active_power)
subdat$Voltage <- as.numeric(subdat$Voltage)
subdat$Global_reactive_power <- as.numeric(subdat$Global_reactive_power)
subdat$Global_intensity <- as.numeric(subdat$Global_intensity)
subdat$Sub_metering_1 <- as.numeric(subdat$Sub_metering_1)
subdat$Sub_metering_1 <- as.numeric(subdat$Sub_metering_1)
subdat$Sub_metering_2 <- as.numeric(subdat$Sub_metering_2)
subdat$Sub_metering_3 <- as.numeric(subdat$Sub_metering_3)
|
5071191f2ab5f96df5220e71da7bb77b2b5b8622
|
374de90d91a1d5ba11e98a4a9614d98e02de8663
|
/experiments/creature_production_2/results/analysis.R
|
eca88fba51c9059f160f71c5dc213e6b1f694c79
|
[
"MIT"
] |
permissive
|
thegricean/modals
|
d2223645529b59e173d18b0b248d6ea09c6d0429
|
9bb267a64542ee30e2770d79d9cd5d9cce890be8
|
refs/heads/master
| 2021-03-12T20:26:09.814798
| 2016-03-01T00:14:30
| 2016-03-01T00:14:30
| 26,193,528
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,506
|
r
|
analysis.R
|
library(ggplot2)
setwd("~/Documents/git/cocolab/modals/experiments/creature_production_2/Submiterator-master")
d = read.table("creature-production-trials.tsv",sep="\t",header=T)
head(d)
d$q1_correct = NA
d[!is.na(d$question1_response),]$q1_correct = 0
d[!is.na(d$question1_response)&(100*(d$question1_response/8))==d$question1_answer,]$q1_correct = 1
d$q2_correct = NA
d[!is.na(d$question2_response),]$q2_correct = 0
d[!is.na(d$question2_response)&(100*(d$question2_response/8))==d$question2_answer,]$q2_correct = 1
d$q3_correct = NA
d[!is.na(d$question3_response),]$q3_correct = 0
d[!is.na(d$question3_response)&(100*(d$question3_response/8))==d$question3_answer,]$q3_correct = 1
d$q4_correct = NA
d[!is.na(d$question4_response),]$q4_correct = 0
d[!is.na(d$question4_response)&d$question4_response != "o"&(100*(d$question4_response/8))==d$question4_answer,]$q4_correct = 1
d$score = NA
d$score = (d$q1_correct+d$q2_correct+d$q3_correct+d$q4_correct)
w = aggregate(score~workerid,data=d,sum)
d$worker_score = NA
d$worker_score = w$score[match(d$workerid,w$workerid)]
d = d[!is.na(d$evidence_type),]
s = read.csv("../results/strength_scores.csv",header=T)
s$ID = paste(s$item,s$evidence_type,s$freq)
d$ID = paste(d$item,d$evidence_type,d$freq)
d$strength_score = s$response[match(d$ID,s$ID)]
d = subset(d, select = c(workerid,item,evidence_type,freq,response,worker_score,strength_score))
table(d$response,d$evidence_type)
d_trim = d[d$worker_score > 13,]
table(d_trim$response,d_trim$evidence_type)
d_trim$item <- factor(d_trim$item)
d_trim$evidence_type <- factor(d_trim$evidence_type)
d_trim$freq <- factor(d_trim$freq)
d_trim = na.omit(d_trim)
head(d_trim)
aggregate(strength_score~response,d_trim,mean)
# modal choice by evidence directness and categorical evidence type
t = as.data.frame(prop.table(table(d_trim$strength_score,d_trim$response),mar=1))
head(t)
colnames(t) = c("Directness","Modal","Proportion")
t$ModalChoice = factor(x=as.character(t$Modal),levels=c("bare","must","might"))
t$Directness = as.numeric(as.character(t$Directness))
ggplot(t, aes(x=Directness,y=Proportion,color=ModalChoice)) +
geom_point() +
#geom_line() +
geom_smooth()
#ylim(0,1)
#+
# facet_wrap(~EvidenceType,scales="free_y")
ggsave("../results/modal_choices_bydirectness.pdf",height=3)
# histogram of modal choice by item and evidence type
t = as.data.frame(prop.table(table(d_trim$evidence_type,d_trim$response),mar=1))
head(t)
colnames(t) = c("EvidenceType","Modal","Proportion")
t$ModalChoice = factor(x=as.character(t$Modal),levels=c("bare","must","probably","might"))
ggplot(t, aes(x=EvidenceType,y=Proportion,fill=ModalChoice)) +
geom_bar(stat="identity")
#ggsave("modal_dist.pdf")
ggplot(t, aes(x=EvidenceType, y=Proportion, color=ModalChoice, group=ModalChoice)) +
geom_point() +
geom_line()
#ggsave("modal_choices_points.pdf")
# histogram of modal choice by item and evidence strength
i = d_trim[d_trim$evidence_type=="indirect",]
t = as.data.frame(prop.table(table(i$freq,i$response),mar=1))
head(t)
colnames(t) = c("EvidenceStrength","Modal","Proportion")
t$ModalChoice = factor(x=as.character(t$Modal),levels=c("bare","must","probably","might"))
ggplot(t, aes(x=EvidenceStrength,y=Proportion,fill=ModalChoice)) +
geom_bar(stat="identity")
ggsave("../results/indirect_modal_dist.pdf")
ggplot(t, aes(x=EvidenceStrength, y=Proportion, color=ModalChoice, group=ModalChoice)) +
geom_point() +
geom_line()
ggsave("../results/indirect_modal_choices_points.pdf")
|
46909b3939ff661b2e1cdac392f3760d532e42e3
|
46a6ae709a45d23694a8233e9ff9318645c52cb6
|
/03 - UI Design/Tabs/app.R
|
4ac8ef23dea0d258cbd381d8582d9af83f606693
|
[] |
no_license
|
lecy/shiny-demo
|
031f2abeb74584a84e5c8ab86ff3b81016f05cf7
|
94fc8f71cdc654d232bcce0e971df6284307a4c4
|
refs/heads/master
| 2021-01-10T09:16:15.048932
| 2016-04-01T11:51:57
| 2016-04-01T11:51:57
| 55,196,646
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,460
|
r
|
app.R
|
# Example of UI and Server files on one page
# One nice feature about single-file apps is that you can copy
# and paste the entire app into the R console, which makes it
# easy to quickly share code for others to experiment with. For
# example, if you copy and paste the code above into the R command
# line, it will start a Shiny app.
## USER INTERFACE
my.ui <- fluidPage(
# Application title
titlePanel("Hello Shiny!"),
# Sidebar with a slider input for the number of bins
sidebarLayout(
sidebarPanel(
sliderInput("bins",
"Number of bins:",
min = 1,
max = 50,
value = 30)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot")
)
)
)
## SERVER
# Expression that generates a histogram. The expression is
# wrapped in a call to renderPlot to indicate that:
#
# 1) It is "reactive" and therefore should re-execute automatically
# when inputs change
# 2) Its output type is a plot
my.server <- function(input, output)
{
output$distPlot <- renderPlot({
x <- faithful[, 2] # Old Faithful Geyser data
bins <- seq(min(x), max(x), length.out = input$bins + 1)
# draw the histogram with the specified number of bins
hist(x, breaks = bins, col = 'darkgray', border = 'white')
})
}
# LAUNCH THE APP !
shinyApp( ui = my.ui, server = my.server )
|
abd9dc08380a0ca4ca213d8cb87d23b2230079c3
|
2eae755d5619934c814a2aec3e8ff01a69ee727f
|
/04/src/04_problem_04.R
|
fc1fd9b8cebffd1801805da6e4afc706bfaba801
|
[] |
no_license
|
tjwhalenUVA/664-Homework
|
8535877e0f2400ae3544888d52a5f052f2f8144d
|
2cb524132d0906d89a65aec3f5d5562d889d6e5c
|
refs/heads/master
| 2021-05-02T00:34:02.722399
| 2018-06-08T17:30:24
| 2018-06-08T17:30:24
| 120,946,506
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 648
|
r
|
04_problem_04.R
|
x <- seq(0, 6, by=0.001)
#Prior
p4_prior <- dgamma(x, shape = p2_alpha, scale = p2_beta)
#Posterior
p4_post <- dgamma(x, shape = p3_alpha_star, scale = p3_beta_star)
#Norm Like
p4_nl=dgamma(x, shape=sum(sum_xi)+1,scale=1/sum(occurences))
p4.plot <-
data.frame('X' = x,
'prior' = p4_prior,
'post' = p4_post,
'normLik' = p4_nl) %>%
gather(Probability, Density, -X) %>%
ggplot() +
geom_line(aes(x=X,
y=Density,
color=Probability),
stat = 'identity',
size=1.5) +
theme_dark() +
labs(title='Triplot of Car Arrival Rates')
|
910879c4713c20b0574338c397360c581a43a846
|
bce2e59b13e142d0e32eb5829dbc2863f2ac2ceb
|
/Pushover/key-api.R
|
3ca91b3f9cf4c73af13ae469ad17a5a4aaac234f
|
[] |
no_license
|
suraggupta/r-helper
|
448096224a08e9002ece202ac2ca1ba889d4e5f7
|
8cb2c3197b93de7bb41b89357119725f9995c622
|
refs/heads/master
| 2021-06-13T09:28:24.203891
| 2017-04-07T04:21:43
| 2017-04-07T04:21:43
| 84,502,143
| 3
| 5
| null | 2017-04-07T16:49:56
| 2017-03-10T00:35:58
|
HTML
|
UTF-8
|
R
| false
| false
| 106
|
r
|
key-api.R
|
##
user_key <- <Your user key here>
user_api <- <Your API key here>
user_device <- <Your deivce name here>
|
4fa5721efb167fe930b7f92654b93bf819671461
|
c3d0a413118cc0aa48f5e1279b1e33a66c64b386
|
/Scripts/Econ580_code.R
|
27a3d0d16a2e2fbfd059c3b544ec81d1ba120f85
|
[] |
no_license
|
andykang8099/American-Interstate-Migration-Pattern-Analysis
|
3afeff4c57cce657796c03a63eef9c774b1f8707
|
fe661e6f4c6186acf0c8f1416842c2c052e1b9c0
|
refs/heads/main
| 2023-02-10T21:52:07.755816
| 2021-01-08T09:49:16
| 2021-01-08T09:49:16
| 319,604,454
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,222
|
r
|
Econ580_code.R
|
library(dplyr)
library("cdlTools")
library("naniar")
library(ggplot2)
library(grid)
library("fastDummies")
library(tibble)
library(interactions)
library(jtools)
fill <- "#4271AE"
lines <- "#1F3552"
data1=read.csv("usa_00005.csv")
head(data1) # 34291600 obs
#for ( i in 1: nrow(data1)) {
# data1[i,2]=fips(data1[i,2],to='Name')
#}
#clean the data
sum(is.na(result3)) # no missing values
data2=data1 %>% replace_with_na(replace = list(INCWAGE = c(999999,999998,0)))
#First, deal with the income
#data2 %>% group_by(YEAR, STATEFIP) %>% filter(STATEFIP==6) %>% summarize(max(INCWAGE)) # Show the max wage in CA in each year
result1= data2 %>% filter ( EMPSTAT == 1 ) %>% select(YEAR, STATEFIP, INCWAGE) %>% group_by(YEAR, STATEFIP) %>% summarise(mean_income=mean(INCWAGE,na.rm =TRUE),median_income=median(INCWAGE,na.rm =TRUE))
#Then, deal with unemployment rate
result2= data2 %>% filter( EMPSTAT == 1 | EMPSTAT == 2) %>% select (YEAR, STATEFIP, EMPSTAT) %>% group_by(YEAR, STATEFIP) %>% summarise(unemployment_rate = sum(EMPSTAT==2)/(sum(EMPSTAT==1)+sum(EMPSTAT==2)))
# Next, read other relevant variables
data3
# Then, deal with the mean house price
data3=read.csv("usa_00006.csv")
data4=data3 %>% replace_with_na(replace = list(VALUEH = c(9999999,9999998,0)))
result3= data4 %>% group_by(YEAR, STATEFIP) %>% summarise(mean_house_value=mean(VALUEH,na.rm=TRUE),median_house_value=median(VALUEH,na.rm=TRUE))
result3
new1=left_join(result1,result2,by=c("YEAR","STATEFIP"))
new2=left_join(new1,result3,by=c("YEAR","STATEFIP"))
new2[new2$mean_house_value==min(new2$mean_house_value),]
# Right-skewed data (The result shows that why I use the mean instead of using average)
d1 = data2 %>% filter ( EMPSTAT == 1 , INCWAGE>0)
p1 <- ggplot(d1, aes(x = INCWAGE)) +
geom_density(fill = fill, colour = lines,
alpha = 0.6)+
labs(x = "pre-tax wage", title = "The density plot of wage for individuals in survey")
p1
p2 <- ggplot(data4, aes(x = VALUEH)) +
geom_density(fill = fill, colour = lines,
alpha = 0.6)+
labs(x = "housing price", title = "The density plot of housing price for individuals in survey")
p2
#knitr::kable(new1)
# Add income tax information
source("income_tax_info.R")
low_tax=c(low_tax_rate_2008,low_tax_rate_2009,low_tax_rate_2010,low_tax_rate_2011,low_tax_rate_2012,low_tax_rate_2013,low_tax_rate_2014,low_tax_rate_2015,low_tax_rate_2016,low_tax_rate_2017,low_tax_rate_2018)
high_tax=c(high_tax_rate_2008,high_tax_rate_2009,high_tax_rate_2010,high_tax_rate_2011,high_tax_rate_2012,high_tax_rate_2013,high_tax_rate_2014,high_tax_rate_2015,high_tax_rate_2016,high_tax_rate_2017,high_tax_rate_2018)
new2[, "low_tax"]=low_tax
new2[, "high_tax"]=high_tax
new2
#Add age, ethnicity, education effect
data5=read.csv("usa_00007.csv")#min(data5$AGE)
#table(data5$STATE,data5$YEAR)
result4=data5 %>% select(YEAR,STATEFIP,AGE) %>% group_by(YEAR,STATEFIP) %>% summarise(age16_30=sum(AGE<30 & AGE>=16)/sum(AGE>0),age31_60=sum(AGE<60 & AGE>=31)/sum(AGE>0),age60=sum(AGE>=60)/sum(AGE>0))
range(data5$EDUC)
result5=data5 %>% select(YEAR,STATEFIP,RACBLK,RACWHT) %>% group_by(YEAR,STATEFIP) %>% summarise(white=sum(RACWHT==2)/sum(RACWHT>0))
result6=data5 %>% select(YEAR,STATEFIP,EDUC) %>% group_by(YEAR,STATEFIP) %>% summarise(educ12=sum(EDUC>=7)/sum(EDUC<20))
new3=left_join(result4,result5,by=c("YEAR","STATEFIP"))
new4=left_join(new3,result6,by=c("YEAR","STATEFIP"))
new5=left_join(new2,new4,by=c("YEAR","STATEFIP"))
colnames(new5)[2]="State"
# Add the fixed effects of states and year
new6= fastDummies::dummy_cols(new5,select_columns = c("YEAR","State"))
#write.csv(new6,'table1.csv')
# Add out-migration rate factor
source("Out_migration rate(or).R")
out_migration=c(or2008,or2009,or2010,or2011,or2012,or2013,or2014,or2015,or2016,or2017,or2018)
new7=add_column(new6,out_migration, .after="State")
# Crime rate data
source("crime_rate.R")
crime_rate=as.vector(crime_rate[,1])
crime_rate_all=c(crime_rate,c15,c16,c17,c18)
crime_rate_all=unlist(crime_rate_all)
crime_rate_all=as.numeric(crime_rate_all)
class(crime_rate_all)
new8=add_column(new7,crime_rate_all, .after="educ12")
quantile ( crime_rate_all )
# Add new migration rate
source("out_migration_group_revised.R")
out_migration_new=c(or2008,or2009,or2010,or2011,or2012,or2013,or2014,or2015,or2016,or2017,or2018)
new9=add_column(new8,out_migration_new, .after="State")
# Do the regression analysis
# Add IV
data6=read.csv("cps_00006.csv")
data7=data6 %>% replace_with_na(replace = list(METRO = c(0,9)))
data8=data7 %>% replace_with_na(replace = list(UHRSWORKLY = 999))
data9=data8 %>% replace_with_na(replace = list(DIFFMOB = 0))
new10= data9 %>% group_by(YEAR,STATEFIP) %>% summarise(metro=mean(METRO== 3,na.rm=TRUE), hp = mean(DIFFMOB == 2,na.rm=TRUE), work=mean(UHRSWORKLY< 10,na.rm=TRUE)) %>% select(YEAR,STATEFIP,hp,metro,work) %>% rename(State=STATEFIP)
new10[1:51,"hp"]=new10[52:102,"hp"]
new11=left_join(new9,new10)
sd(new11$crime_rate_all)
# With only main effects not fixed effects
colnames(new11)
model1=lm(data=new11[,c(3,5,7,8,10,11,80,81,82)],out_migration_new*100 ~ . )
summary(model1)
# After adding the fixed effects
model2=lm(data=new11[,c(3,5,7,8,10,11,18:82)],out_migration_new ~ . )
summary(model2)
# After adding all the factors
colnames(new11)
model3=lm(data=new11[c(-9,-60),c(3,5,7,8,10,11,12:82)],out_migration_new ~ crime_rate_all*mean_house_value+.)
summary(model3)
# Draw the interaction plots
interact_plot(model3, pred = crime_rate_all, modx = mean_house_value, x.label = "housing price",y.label = "out-migration rate", legend.main = "crime rate", colors=c("red","green"))
# Delete outlier points and see what will happen
#dl1=new9
#dl1=dl1[c(-2,-9,-60),]
#model_dl1=lm(data=dl1[,c(3,5,7,8,10:79)],dl1$out_migration_new ~ #dl1$crime_rate_all*dl1$mean_house_value + . )
#summary(model_dl1)
#anova(model2)
# Do the regression with median level instead
colnames(new11)
model4=lm(data=new11[c(-9,-60),c(3,6,7,9,10:82)],out_migration_new ~ crime_rate_all*median_house_value + . )
summary(model4)
#anova(model3,model4)
m1=predict(model3)
m2=predict(model4)
hist(new11[c(-9,-60),]$out_migration_new,probability = TRUE)
plot(density(new11[c(-9,-60),]$out_migration_new),main="Density plot of mean wage/housing price, median wage/housing price")
lines(density(m1),col="red",lwd = 1,lty = 2)
lines(density(m2),col="blue",lwd = 1,lty = 1)
legend("topright", legend = c("Median", "Mean"),
col = c("red", "blue"), lty = 2:1, cex = 0.5)
# Compare the relationship betweeen income differentials and out-migration rate
io1= new8 %>% select (YEAR, out_migration, mean_income) %>% group_by (YEAR) %>% summarise (sum_migration = mean(out_migration),income_diff=max(mean_income)-min(mean_income)) %>% mutate(year=as.character(YEAR), sum_migration10=sum_migration*100)
plot(io1$year,io1$sum_migration10,ylim=c(1, 30))
lines(x, y2, pch = 18, col = "blue", type = "b", lty = 2)
# Add CPI to income and national unemployment rate
cpi=c(1.1669,1.171,1.1521,1.1169,1.0942,1.0784,1.0612,1.06,1.0468,1.0249)
nur=c(0.058,0.093,0.096,0,089,0.081,0.074,0.062,0.053,0.049,0.044,0.039)
# sample statistics
mean(new11$work)
range(new11$work)
sd(new11$crime_rate_all)
# Prediciton
source("out_migration_group_revised.R")
out_migration_new=c(or2008,or2009,or2010,or2011,or2012,or2013,or2014,or2015,or2016,or2017,or2018)
migration=out_migration_new*100
migration
class(migration)
year=rep(2008:2018,each=51)
year=as.character(year)
data1=data.frame(year,migration)
mg=data1%>%group_by(year)%>%summarise(sgd=mean(migration))
mg=mg[,2]
mg=unlist(mg)
mgnew=mg[3:11]
library(lmtest)
GDPGR_level <- as.numeric(mgnew)
GDPGR_lags <- as.numeric(GDPGRSub[-N])
GDPGR_AR2 <- dynlm(ts(GDPGR_level) ~ L(ts(GDPGR_level)) + L(ts(GDPGR_level), 2))
coeftest(GDPGR_AR2)
N=length(mgnew)
forecast <- c("2013:Q1" = coef(GDPGR_AR2) %*% c(1, GDPGR_level[N-1], GDPGR_level[N-2]))
all=c(2.918342,2.913235,2.911159,2.906159,2.903159)
year=c(2019:2023)
plot(year,all,ylim=c(1,5))
data22=data.frame(year,all)
data22 %>%
ggplot( aes(x=year, y=all)) +
geom_line() +
geom_point()+
labs(y="out-migration rate",x="Year")+
ylim(2.5,3)
d1=data.frame(new2$YEAR,out_migration_new)
d2=d1%>%group_by(new2.YEAR)%>%summarize(ous=mean(out_migration_new)) %>% mutate(new2.YEAR=as.factor(new2.YEAR))
ous
ous[1:3]=c(0.026969279,0.026468078,0.02866918)
Actual_Migration_rate=ous
Predicted_Migration_rate=c(2.61,2.59,2.89,2.97,2.95,3.12,3.02,3.04,2.98,2.96,2.9)
year=c(2008:2018)
year=as.factor(year)
data23=data.frame(year,Actual_Migration_rate,Predicted_Migration_rate)
colors <- c("Actual Migration Rate" = "black", "Predicted Migration Rate" = "red")
data23 %>%
ggplot( aes(x=year,group=1)) +
geom_line(aes(y=ous*100,color="Actual Migration Rate")) +
geom_point(aes(y=ous*100,color="Actual Migration Rate"))+
geom_line(aes(y=Predicted_Migration_rate,color="Predicted Migration Rate")) +
geom_point(aes(y=Predicted_Migration_rate,color="Predicted Migration Rate"))+
labs(y="out-migration rate",x="Year",color=" ")+ scale_color_manual(values = colors)
d3=new2%>%group_by(YEAR)%>%summarize(HP=mean(median_house_value),UN=mean(unemployment_rate), IC=mean(median_income))
|
260b420f79a7cc031499f3c4d793684f986f8158
|
f5d0634ca05df154301645e3f0c25b66776c6e22
|
/financialdata.R
|
408cd0dfd4f887371b05d7082d0d87a65e001b2c
|
[] |
no_license
|
YanjingWang/Finance-Project
|
d1b51e6854c863d364db1b05df91495b06bad1e3
|
e2b5e7fd04b4024b9916eff6aefd50a32585110a
|
refs/heads/main
| 2023-04-16T18:34:13.390842
| 2021-04-30T03:31:53
| 2021-04-30T03:31:53
| 363,013,337
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,370
|
r
|
financialdata.R
|
install.packages("quantmod")
install.packages("plyr")
install.packages("xts")
install.packages("zoo")
install.packages("TTR")
library(TTR)
library(zoo)
library(xts)
library(quantmod)
library(plyr)
data<-as.vector(read.csv("/Users/suyue/Desktop/fe800/data/financial/2-21-FinancialData.csv",sep = ","))
#==========================
data=data+1
log.data<-log(data[,2:ncol(data)],base=exp(1))
#==========================
table<-matrix(0,ncol = 7,nrow = ncol(log.data))
for(i in 1:ncol(log.data)){
table[i,]<-as.numeric(quantile(log.data[,i],
probs = c(0.05, 0.25,
0.45,0.5,0.55,
0.75, 0.95),na.rm=TRUE))
}
disp.col<-table[,7]-table[,1]
skew.col<-(table[,7]-table[,4])-(table[,4]-table[,1])
left.col<-table[,3]-table[,2]-(table[,2]-table[,1])
right.col<-table[,7]-table[,6]-(table[,6]-table[,5])
mean.col<-c()
data[is.na(data)]<-0
for(i in 1:ncol(data)){
mean.col[i]<-mean(data[,i])
}
outcome<-cbind(mean.col,disp.col,skew.col,left.col,right.col)
mov.ave<-SMA(skew.col)
#==============GDP============
install.packages("Quandl")
library(Quandl)
Quandl.api_key("-gkmQ_iEFWGzMBHcbn99")
mydata = Quandl("FRED/GDP")#quartly GDP
gdp = as.vector(Quandl("FRED/GDP", type="raw"))
gdp$Date<-as.Date(gdp$Date)
gdp<-subset(gdp,Date>="1973-01-01",select = c(Date,Value))
gdp<- gdp[seq(dim(gdp)[1],1),]
GDPreturns = gdp[1:nrow(gdp),]
for (j in 2:nrow(gdp)) {
GDPreturns[j,2] = (as.numeric(gdp[j,2])-
as.numeric(gdp[j-1,2]))/as.numeric(gdp[j-1,2])*100
}
GDPreturns[1,2]=0
plot(GDPreturns,type="l",col="red",xlab="year",ylab="GDP Growth")
plot(skew.col,type="l",col="blue")
#============2.2=============
#moving average
cor.table<-cbind(GDPreturns$Value[10:nrow(GDPreturns)],mov.ave[10:length(mov.ave)])
cor(cor.table, y = NULL, use = "everything",
method = c("pearson", "kendall", "spearman"))
cor.file.75<-as.data.frame(read.csv("/Users/suyue/Desktop/fe800/data/correlation.csv"))
cor.file.75$Date<-as.Date(cor.file.75$Date,format='%m/%d/%Y')
cor.file.75$mov.ave<-as.numeric(cor.file.75$mov.ave)
cor.file.86<-subset(cor.file.75,Date >"1985-10-01",select = c(Value,USRECQP,mov.ave))
cor.file.84.08<-subset(cor.file.75,Date>="1984-10-01"&Date <"2008-10-01",select = c(Value,USRECQP,mov.ave))
cor.file.08<-subset(cor.file.75,Date >"2007-10-01",select = c(Value,USRECQP,mov.ave))
list<-c(round(cor(cor.file.75$Value,cor.file.75$mov.ave),2),
round(cor(cor.file.75$USRECQP,cor.file.75$mov.ave),2),
round(cor(cor.file.86$Value,cor.file.86$mov.ave),2),
round(cor(cor.file.86$USRECQP,cor.file.86$mov.ave),2),
round(cor(cor.file.08$Value,cor.file.08$mov.ave),2),
round(cor(cor.file.08$USRECQP,cor.file.08$mov.ave),2))
matrix(list,ncol = 3,nrow = 2,dimnames = list(c("GDP Growth","Expansion Indicator"),c("1973–2017","1986–2017","2008-2017")))
cor(GDPreturns$Value,skew.col)
#===========Moody's baa========
spread_aaa <- Quandl("FRBP/SPR_BAA_AAA_MN")
install.packages("jrvFinance")
library(jrvFinance)
Aaa<-as.vector(read.csv("/Users/suyue/Desktop/fe800/data/financial/2-21-FinancialData.csv",sep = ","))
Baa<-as.vector(read.csv("/Users/suyue/Desktop/fe800/data/financial/2-21-FinancialData.csv",sep = ","))
#===============logit model============
#standardized regressors
install.packages("standardize")
library(standardize)
scaled.reg<-scale(outcome)
nber<-as.vector(read.csv("/Users/suyue/Desktop/fe800/data/USRECQP.csv",sep = ","))
nber$DATE<-as.Date(nber$DATE)
nber<-subset(nber,DATE>="1973-01-01",select = c(DATE,USRECQP))
for(i in 1:nrow(nber)){
if(nber$USRECQP[i]==0){
nber$USRECQP[i]=1
}
else{
nber$USRECQP[i]=0
}
}
#correlation
#write csv
#a<-cbind(GDPreturns,nber,mov.ave)
#write.csv(a,file="correlation.csv")
#Pseudo.R2========
Pseudo.R2=function(object){
stopifnot(object$family$family == "binomial")
object0 = update(object, ~ 1)
wt <- object$prior.weights # length(wt)
y = object$y # weighted
ones = round(y*wt)
zeros = wt-ones
fv <- object$fitted.values # length(fv)
if (is.null(object$na.action)) fv0 <- object0$fitted.values else
fv0 <- object0$fitted.values[-object$na.action] # object may have missing values
resp <- cbind(ones, zeros)
Y <- apply(resp, 1, function(x) {c(rep(1, x[1]), rep(0, x[2]))} )
if (is.list(Y)) Y <- unlist(Y) else Y <- c(Y)
# length(Y); sum(Y)
fv.exp <- c(apply(cbind(fv, wt), 1, function(x) rep(x[1], x[2])))
if (is.list(fv.exp)) fv.exp <- unlist(fv.exp) else fv.exp <- c(fv.exp)
# length(fv.exp)
fv0.exp <- c(apply(cbind(fv0, wt), 1, function(x) rep(x[1], x[2])))
if (is.list(fv0.exp)) fv0.exp <- unlist(fv0.exp) else fv0.exp <- c(fv0.exp)
(ll = sum(log(dbinom(x=Y,size=1,prob=fv.exp))))
(ll0 = sum(log(dbinom(x=Y,size=1,prob=fv0.exp))))
n <- length(Y)
G2 <- -2 * (ll0 - ll)
McFadden.R2 <- 1 - ll/ll0
CoxSnell.R2 <- 1 - exp((2 * (ll0 - ll))/n) # Cox & Snell / Maximum likelihood pseudo r-squared
r2ML.max <- 1 - exp(ll0 * 2/n)
Nagelkerke.R2 <- CoxSnell.R2/r2ML.max # Nagelkerke / Cragg & Uhler's pseudo r-squared
out <- c(llh = ll, llhNull = ll0, G2 = G2, McFadden = McFadden.R2,
r2ML = CoxSnell.R2, r2CU = Nagelkerke.R2)
out
}
#=====coefficient==========
coeff<-as.data.frame(cbind(outcome,nber$USRECQP))
coeff.mean<-glm(V6 ~ mean.col, data = coeff,family = binomial())
coeff.disp<-glm(V6 ~ disp.col, data = coeff,family = binomial())
coeff.skew<-glm(V6 ~ skew.col, data = coeff,family = binomial())
coeff.left<-glm(V6 ~ left.col, data = coeff,family = binomial())
coeff.right<-glm(V6 ~ right.col, data = coeff,family = binomial())
summary(coeff.mean)
summary(coeff.disp)
summary(coeff.skew)
coefficients(coeff.skew)
summary(coeff.left)
summary(coeff.right)
Pseudo.R2(coeff.mean)[4]
Pseudo.R2(coeff.disp)[4]
Pseudo.R2(coeff.skew)
Pseudo.R2(coeff.left)[4]
Pseudo.R2(coeff.right)[4]
#nber$USRECQP <- factor(nber$USRECQP)
coeff.table<-glm(V6 ~ mean.col+disp.col+skew.col+left.col+right.col, data = coeff,family = binomial())
summary(coeff.table)
coeff.table$family
Pseudo.R2(coeff.table)[4]
#==========================In-sample Predictive Regressions==================
#===========GDP growth indicator======
install.packages("tseries")
library(tseries)
gdp$Date<-as.Date(gdp$Date)
GDPreturns.ts<-GDPreturns
GDPreturns.ts<-xts(GDPreturns.ts[,-1], order.by=as.Date(GDPreturns.ts[,1], "%m/%d/%Y"))
adf.test(GDPreturns.ts,alternative = "stationary")
nrow(GDPreturns.ts)
new.GDPreturns<-GDPreturns[52:180,]
yth=new.GDPreturns[9:nrow(new.GDPreturns),]
y4=new.GDPreturns[4:(nrow(new.GDPreturns)-5),]
y3=new.GDPreturns[3:(nrow(new.GDPreturns)-6),]
y2=new.GDPreturns[2:(nrow(new.GDPreturns)-7),]
y1=new.GDPreturns[1:(nrow(new.GDPreturns)-8),]
new.scaled.reg<-scaled.reg[52:180,]
M1t=new.scaled.reg[5:(nrow(new.GDPreturns)-4),1]
M2t=new.scaled.reg[5:(nrow(new.GDPreturns)-4),2]
M3t=new.scaled.reg[5:(nrow(new.GDPreturns)-4),3]
M4t=new.scaled.reg[5:(nrow(new.GDPreturns)-4),4]
M5t=new.scaled.reg[5:(nrow(new.GDPreturns)-4),5]
M1t_1=new.scaled.reg[4:(nrow(new.GDPreturns)-5),1]
M2t_1=new.scaled.reg[4:(nrow(new.GDPreturns)-5),2]
M3t_1=new.scaled.reg[4:(nrow(new.GDPreturns)-5),3]
M4t_1=new.scaled.reg[4:(nrow(new.GDPreturns)-5),4]
M5t_1=new.scaled.reg[4:(nrow(new.GDPreturns)-5),5]
fedf<-as.vector(read.csv('/Users/suyue/Desktop/fe800/data/economic_predictors/FedF.csv',sep=','))
ztf<-fedf$Value[5:(nrow(fedf)-4)]
ztf<-scale(ztf)
ztf_1<-fedf$Value[4:(nrow(fedf)-5)]
ztf_1<-scale(ztf_1)
term.sp<-as.vector(read.csv('/Users/suyue/Desktop/fe800/data/economic_predictors/termspread.csv',sep=','))
ztsp<-term.sp$Value[5:(nrow(term.sp)-4)]
ztsp<-scale(ztsp)
ztsp_1<-fedf$Value[4:(nrow(fedf)-5)]
ztsp_1<-scale(ztsp_1)
temp<-cbind(new.GDPreturns$Value[5:(nrow(new.GDPreturns)-4)],yth$Value,y4$Value,y3$Value,y2$Value,y1$Value,M1t,M2t,M3t,M4t,M5t,M1t_1,M2t_1,M3t_1,M4t_1,M5t_1,ztf,ztf_1,ztsp,ztsp_1)
colnames(temp)<-c("yt","Yh","y4",'y3','y2','y1','M1t','M2t','M3t','M4t','M5t','M1t1','M2t1','M3t1','M4t1','M5t1','FedF','FedF_1',"TermSpread","TermSpread_1")
temp<-as.data.frame(temp)
coeff.gdp<-temp[10:nrow(temp),]
#====lm regression============
fitbench<-lm(Yh ~ y4+y3+y2+y1, data=temp)
ab<-summary(fitbench)
ab$r.squared
fit.m1 <- lm(Yh ~ y4+y3+y2+y1+ M1t + M1t1, data=temp)
m1<-summary(fit.m1)
m1$r.squared
coefficients(fit.m1)
fit.m2<-lm(Yh ~ y4+y3+y2+y1+M2t + M2t1, data=coeff.gdp)
m2<-summary(fit.m2)
coefficients(fit.m2)
m2$r.squared
fit.m3<-lm(Yh ~ y4+y3+y2+y1+M3t+M3t1, data=coeff.gdp)
m3<-summary(fit.m3)
coefficients(fit.m3)
m3$r.squared
fit.m4<-lm(Yh ~ y4+y3+y2+y1+M4t + M4t1, data=coeff.gdp)
m4<-summary(fit.m4)
coefficients(fit.m4)
m4$r.squared
fit.m5<-lm(Yh ~ y4+y3+y2+y1+M5t + M5t1, data=coeff.gdp)
m5<-summary(fit.m5)
coefficients(fit.m5)
fit.mt<-lm(Yh ~ y4+y3+y2+y1+ M1t + M1t1+ M2t + M2t1+ M3t + M3t1+ M4t + M4t1+M5t + M5t1, data=coeff.gdp)
summary(fit.mt)
coefficients(fit.mt)
fit.fed<-lm(Yh ~ y4+y3+y2+y1+ztf + ztf_1, data=temp)
mfed<-summary(fit.fed)
coefficients(fit.fed)
mfed$r.squared
fit.sp<-lm(Yh ~ y4+y3+y2+y1+ztsp + ztsp_1, data=temp)
msp<-summary(fit.sp)
coefficients(fit.sp)
msp$r.squared
fit.eco<-lm(Yh ~ y4+y3+y2+y1+ztsp + ztsp_1+ztf+ztf_1, data=temp)
meco<-summary(fit.eco)
meco$r.squared
coefficients(fit.eco)
fit.eco<-lm(Yh ~ y4+y3+y2+y1+M3t + M3t1+ztsp + ztsp_1+ztf+ztf_1, data=temp)
meco<-summary(fit.eco)
meco$r.squared
coefficients(fit.eco)
#new method:
install.packages("dLagM")
library(dLagM)
model.ardl = ardlDlm(x = temp$M3t,
y = temp$yt, p = 1 , q = 4 , show.summary = TRUE)
MASE(model.ardl)
model.ardl = ardlDlm(x = temp$M1t,
y = temp$yt, p = 1 , q = 4 , show.summary = TRUE)
model.ardl = ardlDlm(x = temp$M2t,
y = temp$yt, p = 1 , q = 4 , show.summary = TRUE)
model.ardl = ardlDlm(x = temp$M4t,
y = temp$yt, p = 1 , q = 4 , show.summary = TRUE)
model.ardl = ardlDlm(x = temp$M5t,
y = temp$yt, p = 1 , q = 4 , show.summary = TRUE)
model.ardl = ardlDlm(x = temp$FedF,
y = temp$yt, p = 1 , q = 4 , show.summary = TRUE)
model.ardl = ardlDlm(x = temp$TermSpread,
y = temp$yt, p = 1 , q = 4 , show.summary = TRUE)
model.dlm = ardlDlm(formula = yt ~ M1t+M2t+M3t+M4t+M5t ,
data = data.frame(temp), p = 1 , q = 4, show.summary = TRUE)
model.dlm = ardlDlm(formula = yt ~ FedF+TermSpread ,
data = data.frame(temp), p = 1 , q = 4, show.summary = TRUE)
model.dlm = ardlDlm(formula = yt ~M3t+ FedF+TermSpread ,
data = data.frame(temp), p = 1 , q = 4, show.summary = TRUE)
#ardlDlmForecast(model = model.ardl , x = coeff.gdp$M3t,
# h = 4 , interval = FALSE)
coefficients(model.ardl)
#===========Macro Varables============
macro<-as.vector(read.csv("/Users/suyue/Desktop/fe800/data/economic_predictors/Macro.csv"))
macro.v<-macro[156:284,]
adf.test(macro.v[,2])
adf.test(macro.v[,3])
#Comsumption
coeff.mac<-cbind(macro.v$PCEC[9:nrow(macro.v)],macro.v$PCEC[5:(nrow(macro.v)-4)],macro.v$PCEC[4:(nrow(macro.v)-5)],macro.v$PCEC[3:(nrow(macro.v)-6)],macro.v$PCEC[2:(nrow(macro.v)-7)],macro.v$PCEC[1:(nrow(macro.v)-8)],M3t,M3t_1,ztf,ztf_1,ztsp,ztsp_1)
colnames(coeff.mac)<-c("PCEC","yt","y4","y3","y2","y1","M3t","M3t_1","ztf","ztf_1","ztsp","ztsp_1")
coeff.mac<-as.data.frame(coeff.mac)
adc<-lm(PCEC~y4+y1+y2+y3,data=coeff.mac)
adc<-summary(adc)
coefficients(adc)
adc$r.squared
adc<-lm(PCEC~y4+y1+y2+y3+M3t+M3t_1,data=coeff.mac)
adc<-summary(adc)
coefficients(adc)
adc$r.squared
adc<-lm(PCEC~y4+y1+y2+y3+M3t+M3t_1+ztf+ztf_1+ztsp+ztsp_1,data=coeff.mac)
adc<-summary(adc)
adc$r.squared
model.dlm = ardlDlm(y = coeff.mac$yt , x=coeff.mac$M3t ,
data = data.frame(coeff.mac), p = 1 , q = 4, show.summary = TRUE)
model.dlm = ardlDlm(yt ~M3t + ztf+ztsp ,
data = data.frame(coeff.mac), p = 1 , q = 4, show.summary = TRUE)
coeff.mac$PCEC
#Fixed investment
coeff.invest<-cbind(macro.v$FPI[9:nrow(macro.v)],macro.v$FPI[5:(nrow(macro.v)-4)],macro.v$FPI[4:(nrow(macro.v)-5)],macro.v$FPI[3:(nrow(macro.v)-6)],macro.v$FPI[2:(nrow(macro.v)-7)],macro.v$FPI[1:(nrow(macro.v)-8)],M3t,M3t_1,ztf,ztf_1,ztsp,ztsp_1)
colnames(coeff.invest)<-c("FPI","yt","y4","y3","y2","y1","M3t","M3t_1","ztf","ztf_1","ztsp","ztsp_1")
coeff.invest<-as.data.frame(coeff.invest)
adc<-lm(FPI~y4+y1+y2+y3,data=coeff.invest)
adc<-summary(adc)
coefficients(adc)
adc$r.squared
adc<-lm(FPI~y4+y1+y2+y3+M3t+M3t_1,data=coeff.invest)
adc<-summary(adc)
coefficients(adc)
adc$r.squared
adc<-lm(FPI~y4+y1+y2+y3+M3t+M3t_1+ztf+ztf_1+ztsp+ztsp_1,data=coeff.invest)
adc<-summary(adc)
adc$r.squared
model.dlm = ardlDlm(y = coeff.invest$yt , x=coeff.mac$M3t ,
data = data.frame(coeff.invest), p = 1 , q = 4, show.summary = TRUE)
model.dlm = ardlDlm(yt ~M3t + ztf+ztsp ,
data = data.frame(coeff.invest), p = 1 , q = 4, show.summary = TRUE)
#====gdp&variables=====
a<-cbind((GDPreturns$Value)[52:180],scaled.reg[52:180,])
a<-as.data.frame(a)
fit1<-lm(V1~ mean.col,data=a)
fit2<-lm(V1~ disp.col,data=a)
fit3<-lm(V1~ skew.col,data=a)
fit4<-lm(V1~ left.col,data=a)
fit5<-lm(V1~ right.col,data=a)
g1<-summary(fit1)
g2=summary(fit2)
g3=summary(fit3)
g4=summary(fit4)
g5=summary(fit5)
g1$r.squared
g2$r.squared
g3$r.squared
g4$r.squared
g5$r.squared
g1
g2
g3
g4
g5
coefficients(fit1)
coefficients(fit2)
coefficients(fit3)
coefficients(fit4)
coefficients(fit5)
plot(scaled.reg[1:180,3],(GDPreturns$Value),type = "p",xlim=c(-2,4),ylim=c(0,6),ylab = "GDPGrowth", xlab = "Financial Skewness")
abline(lm(GDPreturns$Value~scaled.reg[1:180,3]), col="red")
#==========moving everage============
ab<-rollmean(skew.col,4)
ab<-c(0,0,0,ab)
c=ab*6.5+1.5
d=new.GDPreturns[,2]
date<-as.Date(new.GDPreturns$Date,format="%m/%d/%Y")
mov.plot<-as.data.frame(cbind(as.character(date),c[52:180]))
mov.plot$V1<-as.Date(mov.plot$V1)
mov.plot<-xts(mov.plot[,-1], order.by=as.Date(mov.plot[,1], "%m/%d/%Y"))
ab<-rollmean(skew.col,4)
gdpgrowth<-xts(new.GDPreturns[,-1], order.by=as.Date(new.GDPreturns[,1], "%m/%d/%Y"))
plot(gdpgrowth,type="l", col='blue',ylim=c(-2,3),main=' ')
lines(mov.plot,type="l",col="red",lwd=2)
legend("right", c("GDP growth","Financial skewness"), lty=c(1,1), lwd=c(2.5,2.5),col=c("blue","red"),pch = 1)
|
e1c4c905ca8ba644ae41bd622dbf7e669deb9422
|
b4ce46f1e6d1bafab63fb76f11686626057c5fc2
|
/fish-analysis.R
|
eaea3010fa8ac778b21f62596637188d9585a87f
|
[] |
no_license
|
course-fish274-2019/ClaudiaMateo
|
6d2d33b5fb779feb9c47b285b7b08e396e244e48
|
154b93446e7a1f753f922a95086d5268e53bda37
|
refs/heads/master
| 2020-09-06T02:53:10.844045
| 2019-11-12T18:06:04
| 2019-11-12T18:06:04
| 220,296,114
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 430
|
r
|
fish-analysis.R
|
fish_data <- read.csv("data/Gaeta_etal_CLC_data1.csv")
library(dplyr)
#Create categorical size column
fish_data_cat = fish_data %>%
mutate(length_cat = ifelse(length > 300, "big", "small"))
library(tidyverse)
ggplot(fish_data) +
geom_point(mapping = aes(x = length, y = scalelength, color = lakeid))
#try this
ggplot(fish_data_cat, aes(x = scalelength, fill = length_cat, binwidth = 80)) +
geom_histogram(bins = 80)
|
c69c7769dea120a9b19e297815a9a1014682907f
|
2d3a7a709e5b783e1f17cf329dd359008ae14ab6
|
/R/second_mark.R
|
ef11c16c95a4dc7ef76ee6c537bcf8b7eda25060
|
[] |
no_license
|
debruine/markr
|
125042a837d5a0b4c5846cff076005fc8129f5fe
|
b037d25414bf1beb4b3f5cf7bf1354a3adaefb98
|
refs/heads/master
| 2021-10-11T10:12:23.391018
| 2019-01-24T15:54:59
| 2019-01-24T15:54:59
| 111,586,219
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,323
|
r
|
second_mark.R
|
#' Generate Second Marking List
#'
#' \code{second_mark} determines what assessments need to be second marked
#' and bundles files into a folder with the second marking sheet
#'
#' @param marking list containing a dataframe (marks) and moodle participant directory (dir)
#' @param dir directory name to save second marking in
#' @param remove_file filename to remove from second marking directory
#' @param pass_min minimum passing mark; all lower marks are second marked (default 9)
#' @param show_first_mark include first mark in second marking files (default FALSE)
#'
#' @return none
#' @examples
#' second_mark(marking, "data-raw/example/submissions")
#' @export
second_mark <- function(marking,
dir = "second_marking",
remove_file = "feedback.pdf",
pass_min = 9, # minimum mark to pass (9 UG, 12 PG)
show_first_mark = FALSE
) {
assign_id <- marking$marks$assign_id[1]
# get distinct questions
if ("question" %in% names(marking$marks)) {
questions <- unique(marking$marks$question) %>% sort()
} else {
marking$marks <- dplyr::mutate(marking$marks, question = "Q1")
questions <- "Q1"
}
# select second marks for each question
for (q in questions) {
# set seed to make this always the same for each assignment
seed <- paste(assign_id, q) %>% utf8ToInt() %>% sum()
set.seed(seed)
fb <- dplyr::filter(marking$marks, question == q)
# calculate how many to second mark
# 10% of marks or 10, whichever is higher (or all if < 10)
second_n <- max(10, ceiling(nrow(fb)/10))
if (nrow(fb) <= second_n) {
message(paste("Selecting all", nrow(fb), q, "to second mark"))
to_second_mark <- dplyr::pull(fb, moodle_id)
} else {
# select all fails
fails <- fb %>%
dplyr::filter(mark < pass_min) %>%
dplyr::pull(moodle_id)
# get ~1/3 low, mid and high marks
low_high_n <- max(0, floor((second_n - length(fails))/3))
mid_n <- max(0, second_n - length(fails) - 2*low_high_n)
f <- fb %>%
dplyr::filter(mark >= pass_min) %>%
dplyr::arrange(mark, moodle_id)
if (nrow(f) > 0 & low_high_n > 0 & mid_n > 0) {
f <- f %>%
dplyr::mutate(
n = 1:nrow(.),
band = ifelse(n <= nrow(.)/3, "low", "med"),
band = ifelse(n > 2*nrow(.)/3, "high", band)
# quantiles don't work well if there is a very uneven distribution of marks
#band = ifelse(mark < stats::quantile(mark, 0.67), "med", "high"),
#band = ifelse(mark < stats::quantile(mark, 0.33), "low", band)
)
lows <- f %>%
dplyr::filter(band == "low") %>%
dplyr::pull(moodle_id) %>%
base::sample(low_high_n)
mids <- f %>%
dplyr::filter(band == "med") %>%
dplyr::pull(moodle_id) %>%
base::sample(mid_n)
highs <- f %>%
dplyr::filter(band == "high") %>%
dplyr::pull(moodle_id) %>%
base::sample(low_high_n)
} else {
message("No one passed")
lows <- c()
mids <- c()
highs <- c()
}
to_second_mark <- c(fails, lows, mids, highs)
}
# create directory
qdir <- paste0(dir, "/", q)
dir.create(qdir, showWarnings = FALSE, recursive = TRUE)
second_marks <- tibble::tibble(
moodle_id = to_second_mark,
question = q,
grade2 = ""
) %>%
dplyr::arrange(moodle_id) %>%
dplyr::left_join(fb, by = c("moodle_id", "question")) %>%
dplyr::select(ID, moodle_id, question, mark1 = mark, grade1 = Grade, grade2)
message(paste(q, "marks = (", toString(sort(second_marks$mark1)), ")"))
if (!show_first_mark) {
second_marks <- dplyr::select(second_marks, -mark1, -grade1)
}
# create marking file
readr::write_csv(
second_marks,
paste0(dir, "/", assign_id, "_", q, "_second_marking.csv")
)
# copy folders to 2nd marking folder
if (is.na(marking$dir)) {
message("No files were copied into the second marking folder")
} else if (!dir.exists(marking$dir)) {
message(paste("The feedback directory", marking$dir,
"doesn't exist, so no files were copied into the second marking folder"))
} else {
all_dirs <- list.dirs(marking$dir)
for (moodle_id in to_second_mark) {
pdir_n <- grep(moodle_id, all_dirs)
if (length(pdir_n) == 1) {
# get the dir that contains the id
pdir <- all_dirs[pdir_n]
} else if (length(pdir_n) == 0) {
message(paste("No directory found for", moodle_id))
pdir <- c()
} else if (length(pdir_n) > 1) {
message(paste(length(pdir_n), "directories copied for", moodle_id))
pdir <- all_dirs[pdir_n]
}
file.copy(from = pdir, to = qdir,
overwrite = TRUE,
recursive = TRUE,
copy.mode = TRUE)
# remove feedback files
fb_file <- paste0(qdir, "/", remove_file)
if (remove_file != "" & file.exists(fb_file)) {
file.remove(fb_file)
}
}
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.