blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
37d38106417c3ee6f3e591f941835d32d887150c | a80f634ea89c9b9dfb720a0b36ea5ad58e357921 | /SINKING FUND SCHEDULE.R | aa84f1f541912e7b5ad7a8d1f685a4f451d0b94d | [] | no_license | SKaizenRGB/TOI- | a4b2210a764f48f5e6b8ffa8eb08b8b0ce28f637 | 6744962752f0b7c260a1b73f96a168eb92921075 | refs/heads/main | 2023-03-22T14:35:50.683550 | 2021-03-12T06:43:32 | 2021-03-12T06:43:32 | 346,965,934 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,279 | r | SINKING FUND SCHEDULE.R |
# SINKING FUND SCHEDULE
# Consider a loan for Php 300,000.00 to be paid in 5 annual payments at
# 6% rate per period. The lender charges an interest at 8% rate per period.
# Generate and complete a Sinking Fund Schedule.
#Given:
L <- 300000 #loan
j <- 0.06 #sinking fund interest rate
i <- 0.08 #interest rate
n <- 5
#Formulas:
I <- L*i
D <- L/(((((1+j)^n) - 1))/j) #Sinking Fund Deposit Formula
options(scipen=999) #to disable scientific notation
#Sinking_Fund_Schedule:
r_1 <- c(0,0,0,0,L) #row one
Sinking_Fund_Schedule <- matrix(ncol=5, nrow=1+n)
Sinking_Fund_Schedule[1,] <- r_1
A <- 0
for(period in 1: n)
{ D #Sinking Fund Deposit
I #Interest Payment
IE <- A*j #Interest on the Sinking Fund
A <- A + D + IE #Amount in the Sinking Fund
L <- L - D - IE #Net Loan Amount
Sinking_Fund_Schedule[period+1,] <- c(D,I,IE,A,L)}
#Sinking_Fund_Schedule legends:
colnames (Sinking_Fund_Schedule) =
c("SF Deposit","Interest Payment","IESF","Amount in SF","Net Loan Amount(Php)")
rownames (Sinking_Fund_Schedule) = c(0,1:n)
library(pander)
pandoc.table(Sinking_Fund_Schedule, style = "grid", split.tables = Inf)
|
3697246c2fbbae192d30c40a830bc87837c7215f | d746fef241f9a0e06ae48cc3b1fe72693c43d808 | /ark_87287/d7cg6m/d7cg6m-001/rotated.r | b0a557755ccae1ceae08baa0e5b6acfbd29f364e | [
"MIT"
] | permissive | ucd-library/wine-price-extraction | 5abed5054a6e7704dcb401d728c1be2f53e05d78 | c346e48b5cda8377335b66e4a1f57c013aa06f1f | refs/heads/master | 2021-07-06T18:24:48.311848 | 2020-10-07T01:58:32 | 2020-10-07T01:58:32 | 144,317,559 | 5 | 0 | null | 2019-10-11T18:34:32 | 2018-08-10T18:00:02 | JavaScript | UTF-8 | R | false | false | 195 | r | rotated.r | r=0.36
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7cg6m/media/images/d7cg6m-001/svc:tesseract/full/full/0.36/default.jpg Accept:application/hocr+xml
|
3fcf476f8601e85607e57d28d429f50560155e1c | 7690401ce83371a7bbdcb637f9bb027389e00d85 | /ADOE-Project.R | 0253cdad314f703c6086181da5fed9aa5efde8c9 | [] | no_license | cshartrand/GT-Project-Advanced-Design-Of-Experiments | 3be7c791ded3fd6f51f6aa4705e2fcbb2ec5abad | d335631bdd0a177e40fb3fda83e9e5058160999b | refs/heads/master | 2021-06-11T04:58:53.293847 | 2017-04-20T23:38:58 | 2017-04-20T23:38:58 | 75,781,433 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,847 | r | ADOE-Project.R | ##ISYE 7400 Project Code
setwd("/Users/dshartra/Documents/GT-Fall-2016/ADOE")
## Load in data
data = read.table("runprojdata.txt",header=T,sep=" ")
head(data)
##Function to calculate vdot (approximation of you V02Max)
vdotf = function(x){
permax = 0.8 +0.1894393*exp(-0.012778*x[2])+0.2989558*exp(-0.1932605*x[2]) #calculate percent maximum
v02 = -4.6 +0.182258*(x[1]/x[2]) + 0.000104*(x[1]/x[2])^2 #calculate your v02
vdot = round(v02/permax) #calculate vdot
return(vdot)
}
##Analysis of Observed base data
x = as.matrix(data)
##Edit data to prevent ties in time (scaled based on tenths and hundreths of seconds)
p1 = sort(x[,1])
p2 = sort(x[,2])
p2[10] = p2[10] +.017
p2[17] = p2[17] +.017
X = cbind(p1,p2)
X = X[1:18,] #remove cross country times (prediction is much better, makes sense as hard to compare XC to track)
f12 = apply(X,1,vdotf)
##Collect Test Dataset (obtained as logitudinal study, same athletes, same coach, same distances, year following training set)
test = read.csv('runprojtest.txt',sep="")
p3 = sort(test$Distance)
p4 = sort(test$Time)
test = cbind(p3,p4)
test = test[1:25,] #remove cross country times (again for better prediction)
##MARS (regression is easier to predict new time from distance)
library(mda)
a=mars(X,f12, degree=2)
summary(lm(f12~a$x-1))
library(lhs)
y.test=apply(test,1,vdotf)
y.pred=predict(a,test)
sqrt(mean((y.pred-y.test)^2)) ##approximately 0.8777997
plot(y.pred,y.test)
abline(0,1)
##Kernel Regression
y = apply(X,1,vdotf)
E=(as.matrix(dist(X, diag=T, upper=T)))^2
n=18
one=rep(1,n)
I =diag(n)
mscv=function(theta)
{
R=exp(-theta*E)+10^(-6)*I
den=c(R%*%one)
e=y-diag(1/den)%*%R%*%y
cv=e/(1-1/den)
return(mean(cv^2))
}
theta=optimize(mscv,c(.1,100000))$min
theta #35,906.05
r=function(x)
exp(-theta*(x-X)^2)
fhat=function(x)
sum(r(x)*y)/sum(r(x))
y.kr = apply(test,1,fhat)
y.test = apply(test,1,vdotf)
sqrt(mean((y.kr-y.test)^2)) ## approximatey 4.734648
plot(y.kr,y.test)
abline(0,1)
#Inverse Distance Weigthing
y = apply(X,1,vdotf)
fhat=function(x)
{
d=abs(x-X)
val=sum(y/(d+10^(-6))^2)/sum(1/(d+10^(-6))^2)
return(val)
}
y.idw = apply(test,1,fhat)
y.test = apply(test,1,vdotf)
sqrt(mean((y.idw-y.test)^2)) ## approximatey 4.734648
plot(y.idw,y.test)
abline(0,1)
##LOESS
## Has issues with the singularties
y = apply(X,1,vdotf)
X1 = as.data.frame(X)
b=loess(y~p1+p2,data=X1,degree=2,span=.5)
y.loess=predict(b,test) ##singularity issues.
sqrt(mean((y.loess-y.test)^2))
plot(y.loess,y.test)
abline(0,1)
##Radial Basis
y=apply(X,1,vdotf)
E=as.matrix(dist(X, diag=T, upper=T))
n=18
I=diag(n)
MSCV=function(para)
{
R=exp(-para*E^2)
Rinv=solve(R+10^(-6)*I)
Rinvd=diag(Rinv)
cv=diag(1/Rinvd)%*%Rinv%*%y
val=mean(cv^2)
return(val)
}
a.opt=optimize(MSCV,c(1,10000))
theta=a.opt$min
theta #58.24415
R=exp(-theta*E^2)
basis=function(h)
exp(-theta*sum(h^2))
r=function(x)
{
A=t(t(X)-x)
vec=apply(A,1,basis)
return(vec)
}
coef=solve(R,y)
fhat=function(x)
t(r(x))%*%coef
y.test=apply(test,1,vdotf)
y.rbf=apply(test,1,fhat)
sqrt(mean((y.rbf-y.test)^2)) #36.41985
plot(y.rbf,y.test)
abline(0,1)
##Radial Basis Function with polynomial precision
y=apply(X,1,vdotf)
E=as.matrix(dist(X, diag=T, upper=T))
n=18
I=diag(n)
one=rep(1,n)
MSCV=function(para)
{
R=exp(-para*E^2)
Rinv=solve(R+10^(-10)*I)
Rinvd=diag(Rinv)
mu=drop(t(one)%*%Rinv%*%y/(t(one)%*%Rinv%*%one))
cv=diag(1/Rinvd)%*%Rinv%*%(y-mu)
val=log(mean(cv^2))
return(val)
}
a.opt=optimize(MSCV,c(1,10000))
theta=a.opt$min
theta #203.7373
R=exp(-theta*E^2)
Rinv=solve(R+10^(-10)*I)
mu=drop(t(one)%*%Rinv%*%y/(t(one)%*%Rinv%*%one))
coef=Rinv%*%(y-mu)
basis=function(h)
exp(-theta*sum(h^2))
r=function(x)
{
A=t(t(X)-x)
vec=apply(A,1,basis)
return(vec)
}
fhat=function(x)
mu+t(r(x))%*%coef
y.test=apply(test,1,vdotf)
y.rbfpp=apply(test,1,fhat)
sqrt(mean((y.rbfpp-y.test)^2)) #3.492863
plot(y.rbfpp,y.test)
abline(0,1)
## Some Kriging Stuff Now
## Ordinary Kriging(should be the same as RBF)
y=apply(X,1,vdotf)
n=18
E=as.matrix(dist(X, diag=T, upper=T))
theta=203.7373
R=exp(-theta*E^2)
one=rep(1,n)
I=diag(n)
Rinv=solve(R+10^(-10)*I)
mu=drop(t(one)%*%Rinv%*%y/(t(one)%*%Rinv%*%one))
coef=Rinv%*%(y-mu)
basis=function(h)
exp(-theta*sum(h^2))
r=function(x)
{
A = t(t(X)-x)
vec=apply(A,1,basis)
return(vec)
}
fhat=function(x)
mu+t(r(x))%*%coef
y.ok=apply(test,1,fhat)
y.test=apply(test,1,vdotf)
sqrt(mean((y.ok-y.test)^2)) #3.492863, confirm same as RBF so thats good to see
plot(y.ok,y.test)
abline(0,1)
#Limit Kriging
theta = 203.7373
n=18
E=as.matrix(dist(X, diag=T, upper=T))
R=exp(-theta*E^2)
one=rep(1,n)
I=diag(n)
Rinv=solve(R+10^(-10)*I)
mu=drop(t(one)%*%Rinv%*%y/(t(one)%*%Rinv%*%one))
coef=Rinv%*%y
basis=function(h)
exp(-theta*sum(h^2))
r=function(x)
{
A = t(t(X)-x)
vec=apply(A,1,basis)
return(vec)
}
den=Rinv%*%one
fhat=function(x){
t(r(x))%*%coef/(t(r(x))%*%den)
}
y.lk = apply(test,1,fhat)
y.test = apply(test,1,vdotf)
sqrt(mean((y.lk-y.test)^2)) #2.148368
plot(y.lk,y.test)
abline(0,1)
## Universal Kriging
y=apply(X,1,vdotf)
n=18
E=as.matrix(dist(X, diag=T, upper=T))
theta=203.7373
R=exp(-theta*E^2)
one=rep(1,n)
I=diag(n)
Rinv=solve(R+10^(-10)*I)
P=as.matrix(cbind(one,X))
Sinv=solve(t(P)%*%Rinv%*%P)
beta=drop(Sinv%*%t(P)%*%Rinv%*%y)
coef=Rinv%*%(y-P%*%beta)
basis=function(h)
exp(-theta*sum(h^2))
r=function(x)
{
A = t(t(X)-x)
vec=apply(A,1,basis)
return(vec)
}
fhat=function(x){
t(c(1,x))%*%beta+t(r(x))%*%coef
}
y.uk = apply(test,1,fhat)
y.test = apply(test,1,vdotf)
sqrt(mean((y.uk-y.test)^2)) #1.857107
plot(y.uk,y.test)
abline(0,1)
par(mfrow=c(3,3))
plot(y.pred,y.test,main="MARS")
abline(0,1)
plot(y.kr,y.test,main="Kernel Regression")
abline(0,1)
plot(y.idw,y.test,main="Inverse Distance Weighting")
abline(0,1)
plot(y.loess,y.test,main="Loess")
abline(0,1)
plot(y.rbf,y.test,main="Radial Basis")
abline(0,1)
plot(y.rbfpp,y.test,main="RBF Polynomial Precision")
abline(0,1)
plot(y.ok,y.test,main="Ordinary Kriging")
abline(0,1)
plot(y.lk,y.test,main="Limit Kriging")
abline(0,1)
plot(y.uk,y.test,main="Universial Kriging")
abline(0,1)
par(mfrow=c(1,1))
## Sensitivity Analysis
min1 = min(test[,1])
max1 = max(test[,1])
min2 = min(test[,2])
max2 = max(test[,2])
z1 = (test[,1]-min1)/(max1-min1)
z2 = (test[,2]-min2)/(max2-min2)
z = cbind(z1,z2)
low = c(1500,3.983)
up = c(5000,17.7)
#Function to scale to [0,1] to do sensitivity analysis (did not do in regular analysis because of various
# difficulties to be discussed later)
scale01 = function(x){
x=low+diag(up-low)%*%x
}
dat = apply(z,1,scale01)
datnew = cbind(dat[1,],dat[2,])
f.eval=predict(a,datnew)
f0=mean(f.eval)
x.ev=seq(0,1,length=20)
val=x.ev
n=25
main.effect=function(ind)
{
D0=z
for(i in 1:20)
{
D0[,ind]=rep(x.ev[i],n)
D0temp =apply(D0,1,scale01)
D0new = cbind(D0temp[1,],D0temp[2,])
val[i]=mean(predict(a,D0new))
}
return(val-f0)
}
d=2
M=matrix(0,nrow=20,ncol=d)
for(j in 1:d)
M[,j]=main.effect(j)
matplot(x.ev,M,type="l",lty=1:d,col=1:d,lwd=2,main="Sensitivity of Variables Distance and Time")
legend(.6,0,c("Distance","Time"),bty="n",lty=1:d,col=1:d,lwd=2)
## Prediction based on "Case Studies"
library(chemCal)
## EXAMPLE: Runner A runs this time in 5k and gets a vdot value from
## one of the prediction models. Run an inverse prediction model
## for all 3 distances (1500,3k,5k) and use the fitted vdot
## to predict the time run for other distances.
w15 =test[1:9,2]
w3 = test[10:18,2]
w5 = test[19:25,2]
pred15 = y.pred[1:9]
pred3 = y.pred[10:18]
pred5 = y.pred[19:25]
inv.pred15 <- lm(pred15 ~ w15)
inv.pred3 <- lm(pred3 ~ w3)
inv.pred5 <- lm(pred5 ~ w5)
##Case studies of team members
##predict kyles 1500/5k time from his 3k vdot from 2015
kyle = cbind(3000,8.483)
kyinv = apply(kyle,1,vdotf)
ky15 = inverse.predict(inv.pred15,kyinv)
ky5 = inverse.predict(inv.pred5,kyinv)
##predict casey 1500/5k time from his 3k vdot from 2015
casey = cbind(3000,10.217)
cainv = apply(casey,1,vdotf)
ca15 = inverse.predict(inv.pred15,cainv)
ca5 = inverse.predict(inv.pred5,cainv)
##predict merlin from 2015 by 3k
merlin = cbind(3000,9.317)
merinv = apply(merlin,1,vdotf)
mer15 = inverse.predict(inv.pred15,merinv)
mer5 = inverse.predict(inv.pred5,merinv)
##predict collin from 2015 by 3k
col = cbind(3000,8.933)
colinv = apply(col,1,vdotf)
col15 = inverse.predict(inv.pred15,colinv)
col5 = inverse.predict(inv.pred5,colinv)
##predict steve from 2014 by 1500
st = cbind(1500,4.083)
stinv = apply(st,1,vdotf)
st3 = inverse.predict(inv.pred3,stinv)
st5 = inverse.predict(inv.pred5,stinv)
##predict chris from 2012 by 5k
ch = cbind(5000,15.883)
chinv = apply(ch,1,vdotf)
ch15 = inverse.predict(inv.pred15,chinv)
ch3 = inverse.predict(inv.pred3,chinv)
##jack bennett from 2012 by 5k
ja = cbind(5000,15.45)
jainv = apply(ja,1,vdotf)
ja15 = inverse.predict(inv.pred15,jainv)
ja3 = inverse.predict(inv.pred3,jainv)
|
c06787ae92d313f76522cebdb4f81cded99dcd61 | 23c5b3db820ebe3ebb66a489b88219689404907e | /plot3.R | d8917f55e1aa426125f7aca23e339f985f512be6 | [] | no_license | paulourbano/ExData_Plotting1 | 865017d7fa12347d9fbe28ef1aa2c0718058dc74 | a51ca93d3c00bc82858fde2940393801ec99e501 | refs/heads/master | 2021-01-18T07:32:26.951765 | 2014-07-13T18:18:46 | 2014-07-13T18:18:46 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,993 | r | plot3.R | ## Exploratory Data Analysis, Coursera
# Project 1, July 2014
# Read data from file
# RAM usage
# > object.size(data)
# 91853280 bytes
data = read.csv("household_power_consumption.txt", sep=";")
# Converting the Time to "Date" class, allowing plot active
# power over the moments of measument
data$Time = strptime(paste(as.character(data$Date), as.character(data$Time)), "%d/%m/%Y %H:%M:%S")
# Converting Date to "Date" class, allowing comparisons
data$Date = as.Date(data$Date, "%d/%m/%Y")
# Coverting Global Active Power to numeric
data$Global_active_power = as.numeric(data$Global_active_power)
# Converting submetering to numeric
data$Sub_metering_1 = as.numeric(data$Sub_metering_1)
data$Sub_metering_2 = as.numeric(data$Sub_metering_2)
data$Sub_metering_3 = as.numeric(data$Sub_metering_3)
# Converting voltage to numeric
data$Voltage = as.numeric(data$Voltage)
# Converting global reactive power to numeric
data$Global_reactive_power = as.numeric(data$Global_reactive_power)
# Subset data; consider only two day period (1st-2nd) in February 2007
subsetData = subset(data, Date >= as.Date("01/02/2007", "%d/%m/%Y") & Date <= as.Date("02/02/2007", "%d/%m/%Y"))
plot(subsetData$Time, subsetData$Sub_metering_1, type="o", pch=".", ylab = "Energy submetering", xlab = "", cex.lab = 0.7, cex.axis = 0.7, yaxp=c(0, 30, 3))
points(subsetData$Time, subsetData$Sub_metering_2, type="o", pch=".", col="red")
points(subsetData$Time, subsetData$Sub_metering_3, type="o", pch=".", col="blue")
legend("topright", col=c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2, cex=0.7)
# As shown below, the subset of data on the two day period of 1st-2nd
# February 2007 for the submetering 2 has a range of [1, 13]. In this way,
# the graph in the assigment description is out of shows a incorrect range.
# > summary(subsetData$Sub_metering_2)
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 1.000 1.000 1.000 1.838 1.000 13.000 |
d03a4f3108b67fb7d7d527f02ec6adabc6e89551 | 7917fc0a7108a994bf39359385fb5728d189c182 | /cran/paws.analytics/man/glue_delete_column_statistics_for_partition.Rd | 2ecb5b809fdb643b590b20fc57c3190f276a7f7d | [
"Apache-2.0"
] | permissive | TWarczak/paws | b59300a5c41e374542a80aba223f84e1e2538bec | e70532e3e245286452e97e3286b5decce5c4eb90 | refs/heads/main | 2023-07-06T21:51:31.572720 | 2021-08-06T02:08:53 | 2021-08-06T02:08:53 | 396,131,582 | 1 | 0 | NOASSERTION | 2021-08-14T21:11:04 | 2021-08-14T21:11:04 | null | UTF-8 | R | false | true | 1,326 | rd | glue_delete_column_statistics_for_partition.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glue_operations.R
\name{glue_delete_column_statistics_for_partition}
\alias{glue_delete_column_statistics_for_partition}
\title{Delete the partition column statistics of a column}
\usage{
glue_delete_column_statistics_for_partition(CatalogId, DatabaseName,
TableName, PartitionValues, ColumnName)
}
\arguments{
\item{CatalogId}{The ID of the Data Catalog where the partitions in question reside. If
none is supplied, the AWS account ID is used by default.}
\item{DatabaseName}{[required] The name of the catalog database where the partitions reside.}
\item{TableName}{[required] The name of the partitions' table.}
\item{PartitionValues}{[required] A list of partition values identifying the partition.}
\item{ColumnName}{[required] Name of the column.}
}
\value{
An empty list.
}
\description{
Delete the partition column statistics of a column.
The Identity and Access Management (IAM) permission required for this
operation is \code{\link[=glue_delete_partition]{delete_partition}}.
}
\section{Request syntax}{
\preformatted{svc$delete_column_statistics_for_partition(
CatalogId = "string",
DatabaseName = "string",
TableName = "string",
PartitionValues = list(
"string"
),
ColumnName = "string"
)
}
}
\keyword{internal}
|
159f29ce0878a51d5b9f36861ca975726f3ed821 | 3b690f89f11745253b68c84d91b53102d6d39eee | /HMMfigures.R | 63e2f72bec79bfa06829a4ad0f3f1f2738d2a933 | [] | no_license | elifdogandar/HiddenMarkovModel-R | 9d0371cdd8cbe75a1e20effa97f73209014fdffa | f790207aba9ea9bebc96b6d424fae46f187ae4f1 | refs/heads/master | 2020-05-15T13:17:51.114495 | 2019-04-19T16:36:33 | 2019-04-19T16:36:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,336 | r | HMMfigures.R | install.packages("ggplot2")
install.packages("gridExtra")
library(ggplot2)
require(gridExtra)
library(tidyr)
####### effect of number of states
test_data <-
data.frame(
fkm = c(0.8121997,0.809192,0.8106626,0.8106626,0.8121332,0.8048029),
fkmed = c(0.85575,0.8734848,0.8840477,0.8689414,0.9168449,0.8746089),
GKfkm= c(0.8181938,0.8136257,0.8326549,0.8418569,0.8626886,0.8731809) ,
number_of_states = c(5,6,7,8,9,10)
)
p1<- test_data %>%
gather(state_selection_method,accuracy, fkm, fkmed,GKfkm) %>%
ggplot(aes(x=number_of_states, y=accuracy, linetype=state_selection_method)) +
theme(legend.position = c(0.2, 0.85),legend.key.size = unit(3, "mm"))+
geom_line()+ggtitle("(a) 60 features chosen with hierarchical method")+
guides(linetype=guide_legend(NULL),shape = guide_legend(override.aes = list(size = 1.5)))+
ylim(0.79, 0.92)+
xlab("Number of states") +
ylab("Accuracy")
test_data2 <-
data.frame(
fkm = c(0.8049372,0.814964,0.841968,0.8152307,0.8671888,0.8567404 ),
fkmed = c(0.8433481,0.8537101,0.8687485,0.8879992,0.8641611,0.8776186),
GKfkm= c(0.8181938,0.8136257,0.8326549,0.8418569,0.8626886,0.8731809) ,
number_of_states = c(5,6,7,8,9,10)
)
p2<- test_data2 %>%
gather(state_selection_method,accuracy, fkm, fkmed,GKfkm) %>%
ggplot(aes(x=number_of_states, y=accuracy, linetype=state_selection_method)) +
geom_line()+ggtitle("(b) 60 features chosen with k-means method")+
theme(legend.position = c(0.2, 0.85),legend.key.size = unit(3, "mm"))+
guides(linetype=guide_legend(NULL),shape = guide_legend(override.aes = list(size = 1.5)))+
ylim(0.79, 0.92)+
xlab("Number of states") +
ylab("Accuracy")
test_data3 <-
data.frame(
fkm = c( 0.7973615, 0.8122675,0.8417884,0.8237442, 0.8402539, 0.8760123),
fkmed = c( 0.8496082,0.8790193,0.8655406,0.8865286,0.894127,0.8790213),
GKfkm= c(0.8181938,0.8136257,0.8326549,0.8418569,0.8626886,0.8731809) ,
number_of_states = c(5,6,7,8,9,10)
)
p3<- test_data3 %>%
gather(state_selection_method,accuracy, fkm, fkmed,GKfkm) %>%
ggplot(aes(x=number_of_states, y=accuracy, linetype=state_selection_method)) +
guides(linetype=guide_legend(NULL),shape = guide_legend(override.aes = list(size = 1.5)))+
geom_line( )+ggtitle("(c) 60 features chosen with k-medoids method")+
theme(legend.position = c(0.2, 0.85),legend.key.size = unit(3, "mm"))+
ylim(0.79, 0.92)+
xlab("Number of states") +
ylab("Accuracy")
test_data4 <-
data.frame(
fkm = c(0.8121332,0.8181718,0.818261,0.8152307,0.8508354,0.8361528),
fkmed = c(0.8807566,0.8566506,0.8718028, 0.9031487,0.8834996,0.8897145),
GKfkm= c(0.8626886,0.8136257,0.8326549,0.8418569,0.8626886,0.8731809) ,
number_of_states = c(5,6,7,8,9,10)
)
p4<- test_data4 %>%
gather(state_selection_method,accuracy, fkm, fkmed,GKfkm) %>%
ggplot(aes(x=number_of_states, y=accuracy, linetype=state_selection_method)) +
guides(linetype=guide_legend(NULL),shape = guide_legend(override.aes = list(size = 1.5)))+
geom_line()+ggtitle("(d) Without any feature selection")+
theme(legend.position = c(0.2, 0.85),legend.key.size = unit(3, "mm"))+
ylim(0.79, 0.92)+
xlab("Number of states") +
ylab("Accuracy")
grid.arrange(p1,p2,p3,p4,ncol=2)
|
d945fc709fd638a82999df38acfc73aeb240cfa3 | f17b6a5fb7d07be8d4de82c4411616e2811d7e72 | /R/summary_data.R | 1a73742d4a44ddc9a29358ea084a4cbcd86cc04e | [] | no_license | rodrigoalmeida94/DragonMasterProject | 7c8f225e16c3439c6d6262533e1dfc260f86e846 | 6f0cfa7acecaaada4902709cfe08cc8bc9251f8b | refs/heads/master | 2021-01-11T17:36:01.225035 | 2017-02-02T16:03:42 | 2017-02-02T16:03:42 | 79,797,172 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 598 | r | summary_data.R | # ---- summary_data ----
# Summary of data of a raster layer
summary_data <- function(raster_list){
out <- c()
for(elem in raster_list){
info <- c(elem, elem@data@names, elem@class[1], elem@extent@xmin,elem@extent@xmax, elem@extent@ymin, elem@extent@ymax, elem@crs@projargs, res(elem),elem@data@min,elem@data@max )
out <- rbind(out,info)
}
out.names <- c('raster', 'name', 'class', 'xmin', 'xmax', 'ymin', 'ymax', 'projargs', 'resx', 'resy', 'data_min', 'data_max')
colnames(out) <- out.names
rownames(out) <- out[,'name']
out <- subset(out, select = -name)
return(out)
} |
6c263c2a5cbc15b4c766bf7c1aaae6e837bc967e | 25524de30d715f1464789860405e6d98d4e71159 | /Lab4-3.R | 6124b5a5b1a07fd3f4000a4a94ba12f4efdd2cae | [] | no_license | sunnyhyo/Multivariate-Statistical-Analysis | 1d3e5f46418de4dcadd2240f383d35aa51fc8648 | 4ebfd44670ab8b1f30a2f238603b1515b0877e33 | refs/heads/master | 2021-04-26T23:02:13.698323 | 2018-05-28T01:45:19 | 2018-05-28T01:45:19 | 123,919,384 | 0 | 1 | null | null | null | null | WINDOWS-1252 | R | false | false | 1,327 | r | Lab4-3.R | #Lab4-3
getwd()
library(ggplot2)
n<-100
p<-5
ex1<-data.frame(matrix(rnorm(n*p),ncol=p))
# use R i386 version ÀÛµ¿ ¾ÈµÊ
#library(rggobi)
#ggobi(ex1)
Sigma1<-matrix(c(1, 0.6,0.5,-0.2, -0.3,
0.6 , 1, 0.2, -0.4,-0.2,
0.5 , 0.2 , 1 , 0.5, 0.1,
-0.2,-0.4 ,0.5, 1, 0.2,
-0.3,-0.2 ,0.1, 0.2, 1),ncol=5)
e.sigma1<-eigen(Sigma1)
Sigma1.5<-e.sigma1$vectors %*%
diag(sqrt(e.sigma1$values))%*%
t(e.sigma1$vectors)
ex2<-matrix(rnorm(n*p),ncol=p)%*%Sigma1.5
test<-read.csv("./Lab4-data/test.csv")
test
test.manova<-manova(cbind(X1,X2)~trt,data=test)
summary(test.manova,test=c("Wilks"))
plastic<-read.csv("./data/plastic.csv")
summary(plastic)
plastic.manova<-manova(cbind(X1,X2,X3)~
fac1+fac2+fac1*fac2,data=plastic)
summary(plastic.manova,test=c("Wilks"))
dog<-read.csv("./data/sleeping-dog.csv")
C1<-dog$TRT3+dog$TRT4-dog$TRT1-dog$TRT2
C2<-dog$TRT1+dog$TRT3-dog$TRT2-dog$TRT4
C3<-dog$TRT1+dog$TRT4-dog$TRT2-dog$TRT3
C.tot<-data.frame(C1=C1,C2=C2,C3=C3)
library(ICSNP)
HotellingsT2(C.tot)
x.mean<-colMeans(C.tot)
S<-var(C.tot)
n<-nrow(C.tot)
p<-ncol(C.tot)
T2<-n*t(x.mean)%*%solve(S)%*%x.mean
T2
T2*(n-p)/(p*(n-1))
test.manova= manova(cbind(X1,X2)~trt, data=test)
summary(test.manova)
summary(test.manova, test=c("Wilks"))
|
08f07c134750e4464270842ee4cf3132880aadd6 | 40c65fff3847662ce46d2afd73acf8b68b785107 | /R/model-module-helpers.R | 75c16433d5653c0a28342c26ae2feb6bf5898644 | [
"MIT"
] | permissive | epinowcast/epinowcast | b4d4562603938e9a184d3450d9387f92908cd6bc | 98ec6dbe3c84ecbe3d55ce988e30f8e7cc6b776d | refs/heads/main | 2023-09-05T18:19:10.985900 | 2023-09-05T12:13:49 | 2023-09-05T12:13:49 | 422,611,952 | 23 | 5 | NOASSERTION | 2023-09-14T09:57:09 | 2021-10-29T14:47:06 | R | UTF-8 | R | false | false | 7,889 | r | model-module-helpers.R | #' Identify report dates with complete (i.e up to the maximum delay) reference
#' dates
#'
#' @param new_confirm `new_confirm` `data.frame` output from
#' [enw_preprocess_data()].
#'
#' @return A `data.frame` containing a `report_date` variable, and grouping
#' variables specified for report dates that have complete reporting.
#' @inheritParams enw_preprocess_data
#' @family modelmodulehelpers
enw_reps_with_complete_refs <- function(
new_confirm, max_delay, by = NULL, copy = TRUE
) {
rep_with_complete_ref <- coerce_dt(
new_confirm, select = c(by, "report_date"), copy = copy
)
rep_with_complete_ref <- rep_with_complete_ref[,
.(n = .N),
by = c(by, "report_date")
][n >= max_delay]
rep_with_complete_ref[, n := NULL]
return(rep_with_complete_ref[])
}
#' Construct a lookup of references dates by report
#'
#' @param missing_reference `missing_reference` `data.frame` output from
#' [enw_preprocess_data()].
#'
#' @param reps_with_complete_refs A `data.frame` of report dates with complete
#' (i.e fully reported) reference dates as produced using
#' [enw_reps_with_complete_refs()].
#'
#' @param metareference `metareference` `data.frame` output from
#' [enw_preprocess_data()].
#'
#' @return A wide `data.frame` with each row being a complete report date and'
#' the columns being the observation index for each reporting delay
#' @inheritParams enw_preprocess_data
#' @family modelmodulehelpers
enw_reference_by_report <- function(missing_reference, reps_with_complete_refs,
metareference, max_delay) {
# Make a complete data.table of all possible reference and report dates
miss_lk <- coerce_dt(
metareference, select = "date", group = TRUE
)
data.table::setnames(miss_lk, "date", "reference_date")
miss_lk <- miss_lk[,
.(delay = 0:(max_delay - 1)),
by = c("reference_date", ".group")
]
miss_lk[, report_date := reference_date + delay]
data.table::setkeyv(miss_lk, c(".group", "reference_date", "report_date"))
# Assign an index (this should link with the in model index)
miss_lk[, .id := seq_len(.N)]
# Link with reports with complete reference dates
complete_miss_lk <- miss_lk[
reps_with_complete_refs,
on = c("report_date", ".group")
]
data.table::setkeyv(
complete_miss_lk, c(".group", "report_date", "reference_date")
)
# Make wide format
refs_by_report <- data.table::dcast(
complete_miss_lk[, .(report_date, .id, delay)], report_date ~ delay,
value.var = ".id"
)
return(refs_by_report[])
}
#' Convert latest observed data to a matrix
#'
#' @param latest `latest` `data.frame` output from [enw_preprocess_data()].
#'
#' @return A matrix with each column being a group and each row a reference date
#' @family modelmodulehelpers
latest_obs_as_matrix <- function(latest) {
latest_matrix <- data.table::dcast(
latest, reference_date ~ .group,
value.var = "confirm"
)
latest_matrix <- as.matrix(latest_matrix[, -1])
}
#' Construct a convolution matrix
#'
#' This function allows the construction of convolution matrices which can be
#' be combined with a vector of primary events to produce a vector of secondary
#' events for example in the form of a renewal equation or to simulate
#' reporting delays. Time-varying delays are supported as well as distribution
#' padding (to allow for use in renewal equation like approaches).
#'
#' @param dist A vector of list of vectors describing the distribution to be
#' convolved as a probability mass function.
#'
#' @param t Integer value indicating the number of time steps to convolve over.
#'
#' @param include_partial Logical, defaults to FALSE. If TRUE, the convolution
#' include partially complete secondary events.
#'
#' @return A matrix with each column indicating a primary event and each row
#' indicating a secondary event.
#' @export
#' @family modelmodulehelpers
#' @importFrom purrr map_dbl
#' @importFrom utils head
#' @examples
#' # Simple convolution matrix with a static distribution
#' convolution_matrix(c(1, 2, 3), 10)
#' # Include partially reported convolutions
#' convolution_matrix(c(1, 2, 3), 10, include_partial = TRUE)
#' # Use a list of distributions
#' convolution_matrix(rep(list(c(1, 2, 3)), 10), 10)
#' # Use a time-varying list of distributions
#' convolution_matrix(c(rep(list(c(1, 2, 3)), 10), list(c(4, 5, 6))), 11)
convolution_matrix <- function(dist, t, include_partial = FALSE) {
if (is.list(dist)) {
if (length(dist) != t) {
stop("dist must equal t or be the same for all t (i.e. length 1)")
}
ldist <- lengths(dist)
if (!all(ldist == ldist[1])) {
stop("dist must be the same length for all t")
}
} else {
ldist <- rep(length(dist), t)
dist <- rep(list(dist), t)
}
conv <- matrix(0, nrow = t, ncol = t)
for (s in 1:t) {
l <- min(t - s + 1, ldist[s])
conv[s:(s + l - 1), s] <- head(dist[[s]], l)
}
if (!include_partial && ldist[1] > 1) {
conv[1:(ldist[1] - 1), ] <- 0
}
return(conv)
}
#' Add probability mass functions
#'
#' This function allows the addition of probability mass functions (PMFs) to
#' produce a new PMF. This is useful for example in the context of reporting
#' delays where the PMF of the sum of two Poisson distributions is the
#' convolution of the PMFs.
#'
#' @param pmfs A list of vectors describing the probability mass functions to
#'
#' @return A vector describing the probability mass function of the sum of the
#'
#' @export
#' @importFrom stats ecdf
#' @importFrom purrr map_dbl
#' @family modelmodulehelpers
#' @examples
#' # Sample and analytical PMFs for two Poisson distributions
#' x <- rpois(10000, 5)
#' xpmf <- dpois(0:20, 5)
#' y <- rpois(10000, 7)
#' ypmf <- dpois(0:20, 7)
#' # Add sampled Poisson distributions up to get combined distribution
#' z <- x + y
#' # Analytical convolution of PMFs
#' conv_pmf <- add_pmfs(list(xpmf, ypmf))
#' conv_cdf <- cumsum(conv_pmf)
#' # Empirical convolution of PMFs
#' cdf <- ecdf(z)(0:42)
#' # Compare sampled and analytical CDFs
#' plot(conv_cdf)
#' lines(cdf, col = "black")
add_pmfs <- function(pmfs) {
d <- length(pmfs)
if (d == 1) {
return(pmfs[[1]])
}
if (!is.list(pmfs)) {
return(pmfs)
}
# P(Z = z) = sum_over_x(P(X = x) * P(Y = z - x)) # nolint
return(
Reduce(x = pmfs, f = function(conv, pmf) {
lc <- length(conv)
wd <- seq_len(lc) - 1
proc <- numeric(lc + length(pmf))
for (j in seq_along(pmf)) {
proc[j + wd] <- proc[j + wd] + pmf[j] * conv
}
return(proc)
})
)
}
#' Extract sparse matrix elements
#'
#' This helper function allows the extraction of a sparse matrix from a matrix
#' using `rstan::extract_sparse_parsts()` and returns these elements in a named
#' list for use in stan.
#'
#' @param mat A matrix to extract the sparse matrix from.
#' @param prefix A character string to prefix the names of the returned list.
#'
#' @return Return a list that describes the sparse matrix this includes:
#' - `nw` the number of non-zero elements in the matrix.
#' - `w` the non-zero elements of the matrix.
#' - `nv` the number of non-zero row identifiers in the matrix.
#' - `v` the non-zero row identifiers of the matrix.
#' - `nu` the number of non-zero column identifiers in the matrix.
#' - `u` the non-zero column identifiers of the matrix.
#' @export
#' @family modelmodulehelpers
#' @importFrom rstan extract_sparse_parts
#' @examples
#' mat <- matrix(1:9, nrow = 3)
#' extract_sparse_matrix(mat)
extract_sparse_matrix <- function(mat, prefix = "") {
sparse_mat <- rstan::extract_sparse_parts(mat)
sparse_mat <- list(
nw = length(sparse_mat$w),
w = sparse_mat$w,
nv = length(sparse_mat$v),
v = sparse_mat$v,
nu = length(sparse_mat$u),
u = sparse_mat$u
)
if (prefix != "") {
names(sparse_mat) <- paste0(prefix, "_", names(sparse_mat))
}
return(sparse_mat)
}
|
b88e1ac9c78d26acd898ce5839d4927e87a2c5bf | 9149c718da538584fc774b0d240f410ec1cb45e5 | /man/plastome_alignment.Rd | 9d52a2dc72d59754b5f9d06fc4d4960bba7030c3 | [
"MIT",
"CC0-1.0"
] | permissive | fernphy/ftolr | 4d6d9b39b72cff683a339f9dadefd3e1e14867ed | 2d5cec57e2f74ad9457819463e1c97ecb4aa96d1 | refs/heads/main | 2023-09-01T13:54:02.346424 | 2023-08-16T22:54:55 | 2023-08-16T22:54:55 | 475,787,005 | 0 | 0 | NOASSERTION | 2022-09-08T01:06:21 | 2022-03-30T08:26:18 | R | UTF-8 | R | false | true | 758 | rd | plastome_alignment.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{plastome_alignment}
\alias{plastome_alignment}
\title{Aligned plastome DNA sequences used to build Fern Tree of Life (FTOL)}
\format{
Matrix of class "DNAbin"
}
\usage{
plastome_alignment
}
\description{
Concatenation of 79 loci from whole
fern plastomes, including maximal sampling of available species on GenBank.
}
\details{
Includes 504
species and
75,413
bases.
For details on methods used to assemble alignment, see Nitta et al. 2022.
}
\references{
Nitta JH, Schuettpelz E, Ramírez-Barahona S, Iwasaki W. (2022) An
open and continuously updated fern tree of life.
https://doi.org/10.3389/fpls.2022.909768
}
\keyword{alignments}
\keyword{data}
|
653c2c3c292e8abf291181342efb5e62c36de829 | b6774fa2437f36c4b2709b9823b766df8e9dda2b | /R scripts/Exp 2. Eye-tracking/clause analysis/clause analysis total.R | b469a1fbd6a0223f19f115f43b690dd8722280d1 | [] | no_license | kihyo-park/Processing-ambiguous-comparatives-in-Kor | c3b840df52d423e1f1aefe88d3975159633523bb | 93e3ed9f24f5aaa1e3934435d2a7d87ab259ccdc | refs/heads/master | 2023-05-09T18:49:47.743561 | 2021-06-02T10:38:07 | 2021-06-02T10:38:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,089 | r | clause analysis total.R | ######################################
##### Kihyo's MA thesis analysis #####
######################################
## REMOVE ALL ##
rm(list=ls())
setwd("C:/R/comparatives/clause")
options(scipen=999)
library(plyr)
library(dplyr)
library(ggplot2)
library(gridExtra)
library(lme4)
library(lmerTest)
library(arm)
#####################################
##### Total reading times ###########
#####################################
# read .csv files
df.total = read.csv("total.time.csv", header=T)
str(df.total)
#############
#### EDA ####
#############
# change Participants and Stimulus as integer
df.total$item <- as.integer(df.total$items)
df.total$subject <- as.integer(df.total$subjects)
str(df.total)
# Summaries of the data
total.mean.SD <- ddply(df.total,c("condition"),
summarise,mean = mean(RT),
sd = sd(RT),
n = length(RT),
se = sd/sqrt(n))
total.mean.SD
# condition mean sd n se
# 1 ma-ma 1550.985 712.4706 362 37.44662
# 2 ma-nma 1280.588 663.3623 412 32.68151
# 3 nma-ma 1527.999 679.8476 384 34.69333
# 4 nma-nma 1350.991 678.9794 403 33.82237
# Check the normality of the DVs
ggplot(df.total, aes(x=RT)) + geom_histogram(col="pink",labels=TRUE)
qqnorm(df.total$RT, ylab="Total Reading times");qqline(df.total$RT, col=2)
############################
#### Model Construction ####
############################
# Rescale IVs
df.total <- transform(df.total, parallel.std=arm::rescale(parallel))
str(df.total)
df.total <- transform(df.total, semantic.std=arm::rescale(semantic))
str(df.total)
df.total.max <- lmer(RT ~ parallel.std*semantic.std +
(1+(parallel.std*semantic.std)|item)+(1+(parallel.std*semantic.std)|subject), data = df.total)
summary(df.total.max) # Failed to converge
df.total.1 <- lmer(RT ~ parallel.std*semantic.std +
(1+(parallel.std*semantic.std)|item)+(1+(parallel.std)+(semantic.std)|subject), data = df.total)
summary(df.total.1.)
df.total.2 <- lmer(RT ~ parallel.std*semantic.std +
(1+(parallel.std*semantic.std)|item)+(1+(semantic.std)|subject), data = df.total)
summary(df.total.2)
df.total.3 <- lmer(RT ~ parallel.std*semantic.std +
(1+(parallel.std*semantic.std)|item)+(1|subject), data = df.total)
summary(df.total.3)
df.total.4 <- lmer(RT ~ parallel.std*semantic.std +
(1+(semantic.std)|item)+(1|subject), data = df.total)
summary(df.total.4)
df.total.5 <- lmer(RT ~ parallel.std*semantic.std +
(1|item)+(1|subject), data = df.total)
summary(df.total.5)
# Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
# Formula: RT ~ parallel.std * semantic.std + (1 | item) + (1 | subject)
# Data: df.total
#
# REML criterion at convergence: 24175.9
#
# Scaled residuals:
# Min 1Q Median 3Q Max
# -2.9283 -0.6592 -0.1196 0.5368 3.4464
#
# Random effects:
# Groups Name Variance Std.Dev.
# item (Intercept) 22936 151.4
# subject (Intercept) 177175 420.9
# Residual 285437 534.3
# Number of obs: 1561, groups: item, 48; subject, 37
#
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 1446.96 73.87 42.35 19.588 < 0.0000000000000002 ***
# parallel.std 45.13 51.58 42.31 0.875 0.387
# semantic.std -230.19 51.57 42.41 -4.463 0.0000586 ***
# parallel.std:semantic.std 26.26 103.11 42.39 0.255 0.800
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#
# Correlation of Fixed Effects:
# (Intr) prlll. smntc.
# paralll.std -0.004
# semantc.std 0.010 -0.005
# prlll.std:. -0.002 0.029 -0.013
|
303bcc9bd9d4e9dc9adb1be6db0612e89c7566a2 | 830f467753e1a4ae9b6306cd10259624c3b40281 | /newPMdataCheck.R | a47d7041835099229739a83e9f04e7d11e150925 | [] | no_license | kobajuluwa-eq/AirQualityScripts | d6c13b56704c7c5558fd5542a77ac4631b690324 | d1b58bd1cf906eab0700037611282c227d61fea4 | refs/heads/main | 2023-03-18T22:40:19.624188 | 2021-03-13T06:36:54 | 2021-03-13T06:36:54 | 347,293,685 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 739 | r | newPMdataCheck.R |
impdir <- "C:/Users/Obajuluwa/Downloads/Web_Zephyr_PM_data/Web_Zephyr_PM_data/wetransfer-7c533e"
setwd(impdir)
pmfile <- list.files(path = impdir, pattern = "*.csv")
pmdata <- read_csv(pmfile[1],na = "NA")
summary(pmdata)
bic1415 <- filter(pmdata, cartridge == "BIC1415") #NCF
bic1483 <- filter(pmdata, cartridge == "BIC1483") #Unilag
bic1409 <- filter(pmdata, cartridge == "BIC1409") #Abesan
bic1378 <- filter(pmdata, cartridge == "BIC1378") #Ikorodu
bic1393 <- filter(pmdata, cartridge == "BIC1393") #LASEPA x JANKARA
bic1374 <- filter(pmdata, cartridge == "BIC1374") #JANKARA x LASEPA
summary(bic1415)
summary(bic1483)
summary(bic1409)
summary(bic1378)
summary(bic1393)
summary(bic1374)
|
e24778b02f9f6f131d6214c3e4f64d156cf00a51 | d542d9873025deac2ed6073e0f57f429a8759802 | /corr.R | ffd373f667714c8dfebb024b371953de61285bb3 | [] | no_license | madantecno/Prog_asgmt_2 | e534ef058592006a11efc5f4d1e49d0c5beed385 | c510d9efcb618dd13dd19e49cf595e04cb206ac1 | refs/heads/master | 2020-03-30T02:51:22.295635 | 2015-04-02T06:14:49 | 2015-04-02T06:14:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 952 | r | corr.R | corr<- function(directory,threshold=0){
path=paste("Practice Code/Week 2 asginment R Prog/",directory,sep="")
data = multmerge(path)
required_data=na.omit(data)
result<-as.data.frame(table(required_data$ID))
colnames(result)<-c("ID","nobs")
id_above_threshold<-result[result$nobs>threshold,]
i<-1
corval<-data.frame()
while(i <= 332){
if(i %in% id_above_threshold$ID){
data_to_be_cor<-required_data[required_data$ID==i,]
corval<-rbind(corval,cor(data_to_be_cor$sulfate,data_to_be_cor$nitrate))
}
i<-i+1
}
return(corval)
}
multmerge = function(mypath){
filenames<-list.files(path=mypath, full.names=TRUE)
datalist = lapply(filenames, function(x){read.csv(file=x,header=T)})
Reduce(function(x,y) {x<-rbind(x,y)}, datalist)
}
|
5d01bf55a2c9dab8a86032f7045f159134f93e85 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/fractal/examples/FDWhittle.Rd.R | a3be9795780aea3b7a9c7840cc002265b1c4660e | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 347 | r | FDWhittle.Rd.R | library(fractal)
### Name: FDWhittle
### Title: Estimate the Hurst coefficient by Whittle's method
### Aliases: FDWhittle
### Keywords: univar models nonlinear
### ** Examples
set.seed(100)
walk <- cumsum(rnorm(1024))
FDWhittle(walk, method="discrete", sdf.method="multitaper")
FDWhittle(walk, method="continuous", sdf.method="multitaper")
|
9a8d513d5cbd7837b0f82bba179d4e418f24f9a6 | 90d748c0b36a970827199188b764487e514d3c8c | /man/simsham.Rd | 49d40de1ce8547401ba473e31345efd278609ceb | [] | no_license | bips-hb/simsham | 1c2bca8a8fa86a4074301bfa763debceeaec231f | 74c48e5d53c08d2fe1421d7db553d02fe1ec1299 | refs/heads/main | 2023-06-16T07:22:19.090863 | 2021-07-16T16:05:05 | 2021-07-16T16:05:05 | 386,691,549 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 806 | rd | simsham.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulate.R
\name{simsham}
\alias{simsham}
\title{Simulate normally distributed dataset}
\usage{
simsham(
n = 10,
p = 20,
s = 5,
corrmat = corrmat_identity(p),
beta = beta_first_s_covariates(p, s),
snr = 1
)
}
\arguments{
\item{n}{number of observations}
\item{p}{number of parameters}
\item{s}{number of non-zero coefficients}
\item{snr}{signal-to-noise ratio}
\item{corr_matrix}{function that returns
the correlation matrix}
\item{beta_type}{function that returns the
beta coefficient vector}
}
\value{
A list with X, y
}
\description{
Returns a normally distributed dataset with a
correlation matrix that is given by the function
corr_matrix and the non-zero coefficients as given by
the function beta_type
}
|
1a70d35959d1366723a8ef20f22d6d90d21c0729 | b10d113a245b7de1f7423e277d2957f9baab7c74 | /plot2.R | 7e2e743112058a8cdbc6c54c6676ee79110afdf4 | [] | no_license | Orizel/Project-1 | 511f9fe2040bd2eed2cf71abae682559fa245bb9 | 7605bee95e3569292c01ae3dbbf5ad35d9d5d48c | refs/heads/master | 2022-10-27T21:35:57.896836 | 2020-05-28T19:12:16 | 2020-05-28T19:12:16 | 267,674,820 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 991 | r | plot2.R |
###Loading the dataset inta epc
epc<-read.table("household_power_consumption.txt", skip=1,sep=";")
#Names the 9 variables in the dataset epc
names(epc)<-c("Date", "Time","Global_active_power","Global_reactive_power", "Voltage", "Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3" )
##Selectec the dates 2007-02-01 and 2007-02-02
epc<-subset(epc,epc$Date=="1/2/2007"|epc$Date=="2/2/2007")
epc$Date<-as.Date(epc$Date,format="%d/%m/%Y")
##convert the Time to formant Hour, Minute, Second
epc$Time<-strptime(epc$Time,format="%H:%M:%S")
epc[1:1440,"Time"]<-format(epc[1:1440,"Time"],"2007-02-01 %H:%M:%S")
epc[1441:2880,"Time"]<-format(epc[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
##Convect the dataset Global_active_power to numeric
epc$Global_active_power<-as.numeric(as.character(epc$Global_active_power))
##Make and save plot2.png
png("plot2.png")
plot(epc$Time,epc$Global_active_power,type="l", xlab="", ylab="Global Active Power(kilowatts)")
dev.off() |
0abc5231a7bd42e84e6308dd56756e97721a28b1 | 2e75693a34b01351d3e244734b1a52d411994392 | /DataSciencePrep/C1/Entropyexample.R | 5055f7f1f2258b8769ccb00be7a91b3737cbd55f | [] | no_license | pradeep-pasupuleti/DataAnalytics | 8ef6e940436a511f38532b92bb20899f703f8b04 | ecfee2e94fe4ec5736684503d2c000d9e22f0724 | refs/heads/master | 2021-01-25T05:34:29.715430 | 2015-06-30T11:47:43 | 2015-06-30T11:47:43 | 35,626,431 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 690 | r | Entropyexample.R | library(FSelector)
setwd("C:/Pradeep/Working Set/Consulting/Kaggle/DataSciencePrep/InsofeChap1/Data")
entdata <- read.csv("entropy.csv")
dim(entdata)
summary(entdata)
names(entdata)
str(entdata)
dim(entdata)
weights <- information.gain(buys_computer~., entdata)
print(weights)
subset <- cutoff.k(weights, 4)
f <- as.simple.formula(subset, "buys_computer")
print(f)
weights <- gain.ratio(buys_computer~., entdata)
print(weights)
subset <- cutoff.k(weights, 2)
f <- as.simple.formula(subset, "buys_computer")
print(f)
weights <- symmetrical.uncertainty(buys_computer~., entdata)
print(weights)
subset <- cutoff.biggest.diff(weights)
f <- as.simple.formula(subset, "buys_computer")
print(f) |
4b7404da3d64ad5e075562a464f0dc1d9c0df77f | 907aaa2ef40dd8beeb9d533fa519fac0afaf8e37 | /man/flowerplot.Rd | c768d31261170a71f9494da6622cd93f3702ea6b | [] | no_license | AndreasFischer1985/qqBaseX | eaee341155d66d4ff92ca00d6b4d419c3bf1f28a | 98bec0ce041666d09d2c89a4ddc6b84a2349fa53 | refs/heads/master | 2022-09-14T18:58:05.493380 | 2022-08-26T11:52:38 | 2022-08-26T11:52:38 | 189,703,556 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,282 | rd | flowerplot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/flowerplot.r
\name{flowerplot}
\alias{flowerplot}
\title{Function flowerplot}
\usage{
flowerplot(x = NULL, maximum = NULL, rownames = NULL,
colnames = NULL, main = NULL, color = NULL, color2 = "lightgrey",
add.numbers = F, ncex = 0.8, ndigits = 1, ncol = "black",
circle = 0.5, reverseLegend = F, cex = 0.8, cex.legend = 0.8,
xlim = NULL, ylim = NULL, dist = 4, legend = NULL, xshift = 0)
}
\arguments{
\item{x}{Numeric data.frame or matrix containing the values to be displayed.}
\item{maximum}{Numeric value representing the maximum value. Only needs to be specified when the data does not contain the theoretically possible maximum.}
\item{rownames}{Character vector of the same length as x, containing the row names to be displayed. If NULL (default) rownames of x are applied.}
\item{colnames}{Character vector of the same length as x, containing the column names to be displayed. If NULL (default) colnames of x are applied.}
\item{main}{Character value, containing the title to be displayed. Defaults to NULL.}
\item{color}{Character vector, containing the colors of petals. If NULL (default) the rainbow palette is applied.}
\item{color2}{Character value, containing the color of the background petals. If NULL, no background petals are plotted. Defaults to "lightgrey".}
\item{add.numbers}{Logical value specifying whether to draw numbers next to each petal. Defaults to F.}
\item{ncex}{Size of fonts. If NA (default) is set to cex.}
\item{ndigits}{Numeric value specifying the number of digits to be plotted (if add.numbers==T). Defaults to 2.}
\item{ncol}{Vector containing the color of bars. Defaults to "black".}
\item{circle}{Numeric value specifying the size of the black circle at the center of each flower. Defaults to 0.}
\item{reverseLegend}{Logical value specifying whether to reverse the order of legend entries. Defaults to F.}
\item{xshift}{Numeric value specifying how much to shift flowers to the right. Defaults to 0.}
}
\description{
Plots numeric matrix as a field of flowers.
}
\details{
Plots data.frame as a field of flowers. Each column is represented as a separate flower, each row as a flower's petal.
}
\examples{
flowerplot()
}
\keyword{plotting}
|
b2127b3600cdcf27f30238edc93d4fedc551a906 | e0f25961c87e91234f0e53548b43be59bb2fc8a8 | /elo/linefa.R | db352f09aafb9fb746096260e1af67cc49c45f90 | [] | no_license | lauromoura/playground | 47301daffefd246b1d488328581dfd392ba609a2 | e4e1b64b262505ef2f91a4ae2c9fe4d8ddf3755b | refs/heads/master | 2020-09-21T15:10:32.010547 | 2015-11-12T04:44:30 | 2015-11-12T04:44:30 | 4,940,956 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,067 | r | linefa.R | source("elo.R")
teams <- read.table("teamsPE2015.txt", header=TRUE, stringsAsFactors=FALSE)
loadSeason <- function(filename, teams) {
print(filename)
print(teams)
matches <- read.table(filename, header=TRUE, stringsAsFactors=FALSE)
for (i in seq(nrow(matches))) {
homeElo <- teams[teams$Name == matches[i,"HomeTeam"] ,"Rating"]
awayElo <- teams[teams$Name == matches[i,"AwayTeam"] ,"Rating"]
homeScore <- matches[i, "HomeScore"]
awayScore <- matches[i, "AwayScore"]
shouldIgnoreHome <- matches[i, "IgnoreHome"]
if (is.na(shouldIgnoreHome)){
shouldIgnoreHome <- FALSE
}
print(matches[i,])
newElo <- playMatch(homeElo, awayElo, homeScore, awayScore, ignoreHome=shouldIgnoreHome, debug=TRUE)
teams[teams$Name == matches[i,"HomeTeam"] ,"Rating"] <- newElo[1]
teams[teams$Name == matches[i,"AwayTeam"] ,"Rating"] <- newElo[2]
print(teams[sort(teams$Rating, decreasing=TRUE, index.return=TRUE)$ix,])
}
}
teams <- loadSeason("PE2015.txt", teams)
# teams <- loadSeason("linefa2012.txt", teams)
# teams <- loadSeason("linefa2014.txt", teams) |
a1ee05a6c7520c10c68544a5fa30d47269c715f4 | a04e329d8a7d46fcdda3458c2e4411ed1f2596ac | /Session 1/Histogram Example/app.R | 43cdc8a86fdf7935ceffb98bd8fdbf867ee3ffc0 | [] | no_license | sshawna/Shiny_workshop | 240af635194994aea6d26f6e7375aad46e19f38f | 16f26d14acf7c6b857dc7f4d7c2b6ba57be0df76 | refs/heads/master | 2020-04-15T05:17:06.182811 | 2019-01-24T09:53:06 | 2019-01-24T09:53:06 | 164,415,625 | 5 | 1 | null | null | null | null | UTF-8 | R | false | false | 257 | r | app.R | library(shiny)
ui<-fluidPage(
numericInput( inputId = 'n', label = 'Sample Size', value = 25),
plotOutput( outputId = 'histogram')
)
server<-function(input, output){
output$histogram <- renderPlot({ hist(rnorm( input$n )) })
}
shinyApp(ui,server)
|
c72ba1c1ab07b2f2fed6a6a846ae845060d75f39 | 6c781a8d1c6dba9d7bfacba81e72a15a79aa523a | /macros/tryTry.R | a38fae034bf2d3fc056ee3c54a409aab54030515 | [] | no_license | LukeBatten/antarcticR | 2de359e4c54c3f04e30a965df7dde5c525431faf | 46eecd33a5793274e4c79c2ee19ea590e50ded39 | refs/heads/master | 2021-07-13T16:51:31.435042 | 2021-06-07T22:19:37 | 2021-06-07T22:19:37 | 89,716,057 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,535 | r | tryTry.R | require(antarcticR)
require(dbscan)
require(colorRamps)
require(ggplot2)
require(raster)
dataFrame <- csvToDF("../../areaSigmaList.csv")
## generate a Haversine Matrix from the lat-long dataFrame
havMat <- as.matrix(genHaversineMat(dataFrame))
diag(havMat) <- 99999
havMat <- apply(havMat,1,min)
dataFrame$minHav <- as.vector(havMat)
bedMap <- drawBedmap(reduceResolutionBy=10)
finalFrame <- longLatToSimpleBEDMAP(dataFrame)
finalFrame
G7 <- bedMap +
geom_point(data = finalFrame, aes(x = easting, y = northing, size = minHav, colour = minHav)) +
scale_fill_gradient(low="lightgrey",high="black") + ## This recolours the BEDMAP
scale_colour_gradientn(colours=matlab.like2(100)) + ## This recolours the points themselves
labs(size="Haversine distance", colour="Haversine distance")
G7
finalList <- data.frame(dataFrame$minHav)
max(finalList)
if(1)
{
ggplot(data=finalList, aes(finalList)) +
geom_histogram(breaks=seq(min(finalList),max(finalList+1), by=1000), col="blue") +
##scale_y_log10(breaks=c(10,100)) +
labs(title="Haversine distance for each event") +
labs(x="Haversine distance (m)", y="Counts") +
theme(axis.title.y = element_text(size=25, family="Trebuchet MS")) +
theme(plot.title = element_text(size=30, family="Trebuchet MS", face="bold", hjust=0)) +
theme(axis.text = element_text(size=35, family="Trebuchet MS"),axis.title=element_text(size=25)) #+
###xlim(c( min(finalList) , max(finalList) ))
}
|
095a4b230ba0a051d060f7a612714d508b1b5915 | 6fc77d31ad1688033d6dd9830d3c531760a6aabf | /tests/testthat/test-nonstationary.R | ecabe297a91a894a3a2bda45ec74c5299e10718e | [] | no_license | pbs-assess/sdmTMB | ba24efb807680f28fdfa9a27a2a775b1817b49c8 | 6aa4e8a7847318f81e91a0bfb6c85001db07d0da | refs/heads/main | 2023-09-03T17:06:22.517565 | 2023-08-18T20:54:48 | 2023-08-18T20:54:48 | 149,399,567 | 133 | 12 | null | 2023-05-11T18:43:58 | 2018-09-19T05:59:53 | R | UTF-8 | R | false | false | 11,631 | r | test-nonstationary.R | # basic model fitting and prediction tests
#
# test_that("Test that non-stationary model works with random effects in epsilon works", {
# local_edition(2)
# skip_on_cran()
# skip_on_ci()
# skip_if_not_installed("INLA")
#
# mesh <- make_mesh(predictor_dat, xy_cols = c("x", "y"), cutoff = 0.1)
# epsilons <- exp(rnorm(time_steps))
# s <- sdmTMB_simulate(
# formula = ~ 1,
# mesh = mesh, data = predictor_dat,
# B = c(0.2), rho = 0.5,
# phi = 0.2, range = 0.8, sigma_O = 0, sigma_E = epsilons[1],
# seed = 123, family = gaussian()
# )
# s$time <- predictor_dat$year
# s$year_centered <- s$time - mean(s$time)
#
# # fit non-stationary model - iid
# m <- sdmTMB(
# data = s, formula = observed ~ 1,
# time = "time", mesh = mesh,
# spatiotemporal = "IID", spatial = "off",
# experimental = list(
# epsilon_predictor = "year_centered",
# epsilon_model = "re"
# ),
# control = sdmTMBcontrol(
# lower = list(b_epsilon = -1, ln_epsilon_re_sigma = -3),
# upper = list(b_epsilon = 1, ln_epsilon_re_sigma = 1)
# )
# )
# idx <- grep("ln_epsilon_re_sigma", names(m$sd_report$value))
#
# expect_equal(as.numeric(m$sd_report$value[idx]), -1.054972, tolerance = 0.002)
#
# m <- sdmTMB(
# data = s, formula = observed ~ 1,
# time = "time", mesh = mesh,
# spatiotemporal = "AR1", spatial = "off",
# experimental = list(
# epsilon_predictor = "year_centered",
# epsilon_model = "re"
# ),
# control = sdmTMBcontrol(
# lower = list(b_epsilon = -1, ln_epsilon_re_sigma = -3),
# upper = list(b_epsilon = 1, ln_epsilon_re_sigma = 1)
# )
# )
# idx <- grep("ln_epsilon_re_sigma", names(m$sd_report$value))
#
# expect_equal(as.numeric(m$sd_report$value[idx]), -2.130359, tolerance = 0.002)
# })
## # test_that("Test that non-stationary model works with random effects in epsilon with trend works", {
## # local_edition(2)
## # skip_on_cran()
## # skip_on_ci()
## # skip_if_not_installed("INLA")
# ## #
# set.seed(42)
# mesh <- make_mesh(loc, xy_cols = c("x", "y"), cutoff = 0.1)
# epsilons <- exp(rnorm(time_steps))
# s <- sdmTMB_sim(
# x = x, y = y, mesh = mesh, X = X,
# betas = c(0.2), time_steps = time_steps, rho = 0.5,
# phi = 0.2, range = 0.8, sigma_O = 0, sigma_E = epsilons,
# seed = 123, family = gaussian()
# )
# s$year_centered <- s$time - mean(s$time)
# mesh <- make_mesh(s, xy_cols = c("x", "y"), cutoff = 0.1)
## # # fit non-stationary model - iid
## # m <- sdmTMB(
## # data = s, formula = observed ~ 1,
## # time = "time", mesh = mesh,
## # spatiotemporal = "IID", spatial = "off",
## # experimental = list(
## # epsilon_predictor = "year_centered",
## # epsilon_model = "trend-re"
## # ),
## # control = sdmTMBcontrol(
## # lower = list(b_epsilon = -1, ln_epsilon_re_sigma = -3),
## # upper = list(b_epsilon = 1, ln_epsilon_re_sigma = 1)
## # )
## # )
## # idx <- grep("ln_epsilon_re_sigma", names(m$sd_report$value))
## #
## # expect_equal(as.numeric(m$sd_report$value[idx]), -2.537232, tolerance = 0.002)
## # expect_equal(as.numeric(m$sd_report$value[idx]), -2.537232, tolerance = 0.002)
## #
## # m <- sdmTMB(
## # data = s, formula = observed ~ 1,
## # time = "time", mesh = mesh,
## # spatiotemporal = "AR1", spatial = "off",
## # experimental = list(
## # epsilon_predictor = "year_centered",
## # epsilon_model = "trend-re"
## # ),
## # control = sdmTMBcontrol(
## # lower = list(b_epsilon = -1, ln_epsilon_re_sigma = -3),
## # upper = list(b_epsilon = 1, ln_epsilon_re_sigma = 1)
## # )
## # )
## # idx <- grep("ln_epsilon_re_sigma", names(m$sd_report$value))
## #
## # expect_equal(as.numeric(m$sd_report$value[idx]), -2.303735, tolerance = 0.002)
## # })
## #
## #
test_that("Test that non-stationary model works without spatial field and epsilon trend works", {
local_edition(2)
skip_on_cran()
skip_on_ci()
skip_if_not_installed("INLA")
mesh <- make_mesh(pcod, c("X", "Y"), cutoff = 20)
pcod$fyear <- as.factor(pcod$year)
pcod$time <- pcod$year - min(pcod$year) + 1
pcod$time = scale(pcod$year)
fit <- sdmTMB(
density ~ s(depth),
data = pcod, mesh = mesh,
spatial="off",
time = "year",
spatiotemporal = "ar1",
family = tweedie(link = "log"),
experimental = list(epsilon_model = "trend", epsilon_predictor = "time"),
control = sdmTMBcontrol(lower = list(b_epsilon = -1),
upper = list(b_epsilon = 1))
)
par <- fit$sd_report$value[which(names(fit$sd_report$value)=="b_epsilon")]
expect_equal(as.numeric(par), -0.05852822, tolerance = 0.002)
par <- fit$sd_report$value[which(names(fit$sd_report$value)=="log_sigma_E")]
expect_equal(as.numeric(par), c(1.0534572, 1.0409799, 1.0285026, 1.0035480, 0.9785934, 0.9536388, 0.9286842, 0.9037296, 0.8787750), tolerance = 0.002)
# fit non-stationary model - iid
fit <- sdmTMB(
density ~ s(depth),
data = pcod, mesh = mesh,
spatial="off",
time = "year",
spatiotemporal = "iid",
family = tweedie(link = "log"),
experimental = list(epsilon_model = "trend", epsilon_predictor = "time"),
control = sdmTMBcontrol(lower = list(b_epsilon = -1),
upper = list(b_epsilon = 1))
)
par <- fit$sd_report$value[which(names(fit$sd_report$value)=="b_epsilon")]
expect_equal(as.numeric(par), -0.04915406, tolerance = 0.002)
par <- fit$sd_report$value[which(names(fit$sd_report$value)=="log_sigma_E")]
expect_equal(as.numeric(par), c(1.1262184, 1.1157395, 1.1052607, 1.0843029, 1.0633452, 1.0423874, 1.0214297, 1.0004719, 0.9795142), tolerance = 0.002)
})
test_that("Test that non-stationary model works with spatial field and epsilon trend works", {
local_edition(2)
skip_on_cran()
skip_on_ci()
skip_if_not_installed("INLA")
mesh <- make_mesh(pcod, c("X", "Y"), cutoff = 20)
pcod$fyear <- as.factor(pcod$year)
pcod$time <- pcod$year - min(pcod$year) + 1
pcod$time = scale(pcod$year)
fit <- sdmTMB(
density ~ s(depth),
data = pcod, mesh = mesh,
spatial="on",
time = "year",
spatiotemporal = "ar1",
family = tweedie(link = "log"),
experimental = list(epsilon_model = "trend", epsilon_predictor = "time"),
control = sdmTMBcontrol(lower = list(b_epsilon = -1),
upper = list(b_epsilon = 1))
)
par <- fit$sd_report$value[which(names(fit$sd_report$value)=="b_epsilon")]
expect_equal(as.numeric(par), -0.04435818, tolerance = 0.002)
par <- fit$sd_report$value[which(names(fit$sd_report$value)=="log_sigma_E")]
expect_equal(as.numeric(par), c(0.8882275, 0.8787710, 0.8693146, 0.8504016, 0.8314887, 0.8125758, 0.7936628, 0.7747499, 0.7558370), tolerance = 0.002)
# fit non-stationary model - iid
fit <- sdmTMB(
density ~ s(depth),
data = pcod, mesh = mesh,
spatial="on",
time = "year",
spatiotemporal = "iid",
family = tweedie(link = "log"),
experimental = list(epsilon_model = "trend", epsilon_predictor = "time"),
control = sdmTMBcontrol(lower = list(b_epsilon = -1),
upper = list(b_epsilon = 1))
)
par <- fit$sd_report$value[which(names(fit$sd_report$value)=="b_epsilon")]
expect_equal(as.numeric(par), -0.0457674, tolerance = 0.002)
par <- fit$sd_report$value[which(names(fit$sd_report$value)=="log_sigma_E")]
expect_equal(as.numeric(par), c(0.8916701, 0.8819133, 0.8721564, 0.8526426, 0.8331288, 0.8136150, 0.7941013, 0.7745875, 0.7550737), tolerance = 0.002)
})
test_that("Test that non-stationary model works with epsilon trend and delta model", {
local_edition(2)
skip_on_cran()
skip_on_ci()
skip_if_not_installed("INLA")
mesh <- make_mesh(pcod, c("X", "Y"), cutoff = 20)
pcod$fyear <- as.factor(pcod$year)
pcod$time <- pcod$year - min(pcod$year) + 1
pcod$time = scale(pcod$year)
fit <- sdmTMB(
density ~ s(depth),
data = pcod, mesh = mesh,
spatial="off",
time = "year",
spatiotemporal = "ar1",
family = delta_gamma(),
experimental = list(epsilon_model = "trend", epsilon_predictor = "time"),
control = sdmTMBcontrol(lower = list(b_epsilon = -1),
upper = list(b_epsilon = 1))
)
par <- fit$sd_report$value[which(names(fit$sd_report$value)=="b_epsilon")]
expect_equal(as.numeric(par), c(-0.07908264, -0.09297464), tolerance = 0.002)
})
test_that("Test that non-stationary model works without spatial field and random effects in epsilon", {
local_edition(2)
skip_on_cran()
skip_on_ci()
skip_if_not_installed("INLA")
set.seed(42)
time_steps <- 20
epsilons <- exp(rnorm(time_steps, mean = 0, sd = exp(-3)))
# make fake predictor(s) (a1) and sampling locations:
predictor_dat <- data.frame(
X = runif(length(epsilons)*50), Y = runif(length(epsilons)*50),
a1 = rnorm(length(epsilons)*50), year = rep(1:length(epsilons), each = 50)
)
mesh <- make_mesh(predictor_dat, xy_cols = c("X", "Y"), cutoff = 0.1)
sim_dat <- sdmTMB_simulate(
formula = ~ 1 + a1,
data = predictor_dat,
time = "year",
mesh = mesh,
family = gaussian(),
range = 0.5,
sigma_E = epsilons,
phi = 0.01,
sigma_O = 0,
seed = 42,
B = c(0.2, -0.4) # B0 = intercept, B1 = a1 slope
)
sim_dat$time <- sim_dat$year
sim_dat$year_centered <- sim_dat$time - mean(sim_dat$time)
fit <- sdmTMB(
observed ~ a1,
data = sim_dat, mesh = mesh,
spatial="off",
time = "year",
spatiotemporal = "iid",
experimental = list(epsilon_model = "re"),
control = sdmTMBcontrol(lower = list(ln_epsilon_re_sigma = -20),
upper = list(ln_epsilon_re_sigma = -1))
)
par <- fit$sd_report$value[which(names(fit$sd_report$value)=="ln_epsilon_re_sigma")]
expect_equal(as.numeric(par), -14.0, tolerance = 0.002)
par <- fit$sd_report$par.fixed[1:2]
expect_equal(as.numeric(par), c(0.2579745,-0.40099), tolerance = 0.002)
})
test_that("Test that non-stationary model works without spatial field and trend and random effects in epsilon", {
local_edition(2)
skip_on_cran()
skip_on_ci()
skip_if_not_installed("INLA")
set.seed(42)
time_steps <- 20
epsilons <- exp(rnorm(time_steps, mean = 0, sd = exp(-3)))
# make fake predictor(s) (a1) and sampling locations:
predictor_dat <- data.frame(
X = runif(length(epsilons)*50), Y = runif(length(epsilons)*50),
a1 = rnorm(length(epsilons)*50), year = rep(1:length(epsilons), each = 50)
)
mesh <- make_mesh(predictor_dat, xy_cols = c("X", "Y"), cutoff = 0.1)
sim_dat <- sdmTMB_simulate(
formula = ~ 1 + a1,
data = predictor_dat,
time = "year",
mesh = mesh,
family = gaussian(),
range = 0.8,
rho = 0.5,
sigma_E = epsilons,
phi = 0.2,
sigma_O = 0,
seed = 42,
B = c(0.2, -0.4) # B0 = intercept, B1 = a1 slope
)
sim_dat$time <- sim_dat$year
sim_dat$year_centered <- sim_dat$time - min(sim_dat$time)
fit <- sdmTMB(
observed ~ a1,
data = sim_dat, mesh = mesh,
spatial="off",
time = "year",
spatiotemporal = "iid",
experimental = list(epsilon_model = "trend-re",
epsilon_predictor = "year_centered"),
control = sdmTMBcontrol(lower = list(ln_epsilon_re_sigma = -15, b_epsilon=-1),
upper = list(ln_epsilon_re_sigma = -1, b_epsilon=1))
)
par <- fit$sd_report$value[which(names(fit$sd_report$value)=="b_epsilon")]
expect_equal(as.numeric(par), 0.01257052, tolerance = 0.002)
})
|
5dcf28c881ad1e1872b2aa5e4ec4d4afe206f95a | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Biere/tipfixpoint/eijk.S382.S-f4/eijk.S382.S-f4.R | 3267f7f1ab56a9a731f47288de4d2ac440db0809 | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 65 | r | eijk.S382.S-f4.R | 9b4cc96d1857a6c3c551436fbe5e7454 eijk.S382.S-f4.qdimacs 2573 7324 |
85e9b19432adaf3999dc5f50a2eab06d315554b6 | 997492427de19d25d5d3562604ac26310679f4f8 | /gibbs functions GP.R | b0d8942f3ae3f45051b8f029b71fcc83efb7dce7 | [] | no_license | drvalle1/github_GP | 23a4d3dc3323bfdefaf022fad496b2428d012ae0 | c6fd45ff1739d8d10e470eefe14e127230a41799 | refs/heads/master | 2021-01-23T12:38:40.641121 | 2017-05-26T19:23:58 | 2017-05-26T19:23:58 | 93,185,203 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,965 | r | gibbs functions GP.R | tnorm <- function(n,lo,hi,mu,sig){ #generates truncated normal variates based on cumulative normal distribution
#normal truncated lo and hi
if(length(lo) == 1 & length(mu) > 1)lo <- rep(lo,length(mu))
if(length(hi) == 1 & length(mu) > 1)hi <- rep(hi,length(mu))
q1 <- pnorm(lo,mu,sig) #cumulative distribution
q2 <- pnorm(hi,mu,sig) #cumulative distribution
z <- runif(n,q1,q2)
z <- qnorm(z,mu,sig)
z[z == -Inf] <- lo[z == -Inf]
z[z == Inf] <- hi[z == Inf]
z
}
#----------------------------------------------------------------------------------------------
acceptMH <- function(p0,p1,x0,x1,BLOCK){ #accept for M, M-H
# if BLOCK, then accept as a block,
# otherwise, accept individually
nz <- length(x0) #no. to accept
if(BLOCK) nz <- 1
a <- exp(p1 - p0) #acceptance PR
z <- runif(nz,0,1)
keep <- which(z < a)
if(BLOCK & length(keep) > 0) x0 <- x1
if(!BLOCK) x0[keep] <- x1[keep]
accept <- length(keep)
list(x = x0, accept = accept)
}
#-------------------------------
rmvnorm1=function (n, sigma, pre0.9_9994 = FALSE)
{
# retval <- chol(sigma, pivot = TRUE)
# o <- order(attr(retval, "pivot"))
# retval <- retval[, o]
s. <- svd(sigma)
if (!all(s.$d >= -sqrt(.Machine$double.eps) * abs(s.$d[1]))) {
warning("sigma is numerically not positive definite")
}
R = t(s.$v %*% (t(s.$u) * sqrt(s.$d)))
retval <- matrix(rnorm(n * ncol(sigma)), nrow = n, byrow = !pre0.9_9994) %*% R
retval
}
#----------------------------
create.dmat=function(dat){
nloc=length(unique(dat$loc.id))
dmat=matrix(0,nrow(dat),nloc)
for (i in 1:nloc){
cond=dat$loc.id==i
dmat[cond,i]=1
}
Matrix(dmat)
# teste=apply(dmat,2,sum)
# plot(teste,table(dat$loc.id))
# unique(teste-table(dat$loc.id))
}
#--------------------------
create.K=function(theta,covmat,nclust,nparam){
tmp=matrix(0,nclust,nclust)
for (i in 1:nparam){
tmp=tmp+theta[i]*covmat[[i]]
}
exp(-tmp)
}
#--------------------------
update.alpha=function(param,dtd,xmat,dmat){
prec=dtd+(1/param$sig2)*param$invK
var1=as.matrix(solve(prec))
err=param$z-xmat%*%param$betas
pmedia=t(dmat)%*%err
if (!isSymmetric(var1)) var1=(var1+t(var1))/2
t(rmvnorm(1,var1%*%pmedia,var1))
}
#--------------------------
update.betas=function(param,xtx,xmat,dat){
prec=xtx+diag(1,ncol(xmat))
var1=solve(prec)
err=param$z-param$alpha[dat$loc.id]
pmedia=t(xmat)%*%err
t(rmvnorm(1,var1%*%pmedia,var1))
}
#--------------------------
update.sig2=function(param,nclust,a.sig2,b.sig2){
a=(nclust+2*a.sig2)/2
err=param$alpha
b=b.sig2+(t(err)%*%param$invK%*%err/2)
1/rgamma(1,a,b)
}
#--------------------------
update.z=function(param,dat,xmat){
media=param$alpha[dat$loc.id]+xmat%*%param$betas
z=rep(NA,nrow(dat))
cond=!is.na(dat$microsc1) & dat$microsc1==1
z[cond]=tnorm(sum(cond),lo=0,hi=Inf ,mu=media[cond],sig=1)
cond=!is.na(dat$microsc1) & dat$microsc1==0
z[cond]=tnorm(sum(cond),lo=-Inf,hi=0,mu=media[cond],sig=1)
cond= is.na(dat$microsc1)
z[cond]=rnorm(sum(cond),mean=media[cond],sd=1)
z
}
#--------------------------
get.inverse=function(D,sig2,K,dtd,nclust){
med=solve(diag(1,nclust)+sig2*K%*%dtd)
zzz=-sig2*D%*%med%*%K%*%t(D)
diag(zzz)=1+diag(zzz)
zzz
}
#--------------------------
update.theta=function(param,jump,xmat,nparam,ntheta.vals,nclust,dmat,dtd,theta.vals,covmat){
theta.orig=theta.new=theta.old=param$theta
err=param$z-xmat%*%param$betas
for (i in 1:nparam){
ind=which(theta.old[i]==theta.vals)
seq1=(-jump[i]):jump[i]
ind1=ind+sample(seq1,size=1)
#make reflection
if (ind1>ntheta.vals) {e=ind1-ntheta.vals; ind1=ntheta.vals-e}
if (ind1<1) {e=1-ind1; ind1=1+e}
theta.new[i]=theta.vals[ind1]
}
#calculate stuff
K.old=param$K
invK.old=param$invK
K.new=create.K(theta.new,covmat,nclust,nparam)
invK.new=solve(K.new)
#---------------------
tmp=(1/param$sig2)*invK.old+dtd
p1.old=determinant(tmp,logarithm = T)$modulus[[1]]+
determinant(param$sig2*K.old,logarithm = T)$modulus[[1]]
inv.old=get.inverse(dmat,param$sig2,K.old,dtd,nclust)
tmp=(1/param$sig2)*invK.new+dtd
p1.new=determinant(tmp,logarithm = T)$modulus[[1]]+
determinant(param$sig2*K.new,logarithm = T)$modulus[[1]]
inv.new=get.inverse(dmat,param$sig2,K.new,dtd,nclust)
# zzz=Matrix(diag(1,nobs))+param$sig2*dmat%*%K.new%*%t(dmat)
# zzz1=solve(zzz)
# hist(data.matrix(inv.new)-data.matrix(zzz1))
# determinant(zzz,logarithm = T)$modulus[[1]]
p2.old=t(err)%*%inv.old%*%err
p2.new=t(err)%*%inv.new%*%err
#---------------------
pold=-(1/2)*(p1.old+p2.old)
pnew=-(1/2)*(p1.new+p2.new)
k=acceptMH(pold,pnew,theta.old,theta.new,T)
theta.old=k$x
if (k$accept==0) {K=K.old; invK=invK.old}
if (k$accept==1) {K=K.new; invK=invK.new}
list(theta=theta.old,K=K,invK=invK,accept=rep(k$accept,nparam))
}
|
127c9c4e060bed4ec25de851ea44471c128cea16 | ccdd4b3b8565f9bb47c71d22816dc9cedff2d4c3 | /census_workflow_adjust_basepop.R | 80966ed2e6ab46fec75f0f40f9b61caea2ce7d97 | [] | no_license | Shelmith-Kariuki/ddharmony | fea918acb462cc16abb1cb3753d222226134314f | 6cea3db54df692c420c05e3141a00852a2d882dd | refs/heads/main | 2023-07-14T16:47:00.370682 | 2021-08-04T08:21:08 | 2021-08-04T08:21:08 | 338,287,122 | 0 | 0 | null | 2021-02-12T10:30:13 | 2021-02-12T10:30:13 | null | UTF-8 | R | false | false | 8,348 | r | census_workflow_adjust_basepop.R | # This function carries out the basepop adjustment for children missing from census counts
# See census workflow chart XX for a description of the logic/steps
census_workflow_adjust_basepop <- function(popM1, # male pop counts by single year of age
popF1, # female pop counts by single year of age
popM_unsmoothed, # male pop counts from before smoothing step of workflow (can be single, abridged or five-year)
popF_unsmoothed, # female pop counts from before smoothing step of workflow
Age_unsmoothed, # starting age of age groups for unsmoothed series
smooth_method = NA, # smoothing method used
LocID,
census_reference_date, # decimal year
nLxMatFemale = NULL, # matrix of nLx life table values for females. If NULL then values from DemoToolsData will be used.
nLxMatMale = NULL, # matrix of nLx life table values for males
nLxMatDatesIn = NULL, # dates associated with nLx matrices
AsfrMat = NULL, # matrix of age-specific fertility rates. If NULL then DemoToolsData values are used.
AsfrDatesIn = NULL, # dates associated with ASFR matrix
SRB = NULL, # vector of sex ratio at birth
SRBDatesIn = NULL, # dates associated with SRB vector
radix = NULL) { # radix associated with nLx values
Age1 <- 1:length(popM1)-1
# group to abridged age groups
popM_abr <- DemoTools::single2abridged(popM1)
popF_abr <- DemoTools::single2abridged(popF1)
Age_abr <- as.numeric(row.names(popM_abr))
# run basepop_five()
BP1 <- DemoTools::basepop_five(location = LocID,
refDate = census_reference_date,
Age = Age_abr,
Females_five = popF_abr,
Males_five = popM_abr,
nLxFemale = nLxMatFemale,
nLxMale = nLxMatMale,
nLxDatesIn = nLxMatDatesIn,
AsfrMat = AsfrMat,
AsfrDatesIn = AsfrDatesIn,
SRB = SRB,
SRBDatesIn = SRBDatesIn,
radix = radix,
verbose = FALSE)
# graduate result to single year of age
popM_BP1 <- DemoTools::graduate_mono(Value = BP1[[2]], Age = Age_abr, AgeInt = DemoTools::age2int(Age_abr), OAG = TRUE)
popF_BP1 <- DemoTools::graduate_mono(Value = BP1[[1]], Age = Age_abr, AgeInt = DemoTools::age2int(Age_abr), OAG = TRUE)
# what is the minimum age at which BP1 is not higher than input population for both males and females
BP1_higher <- popM_BP1 > popM1 & popF_BP1 > popF1
minLastBPage1 <- min(Age1[!BP1_higher],10) - 1
if (minLastBPage1 >= 0) {
# graduate the unsmoothed series to single age if necessary
AgeInt_unsmoothed <- DemoTools::age2int(Age_unsmoothed)
if (!(max(AgeInt_unsmoothed, na.rm=TRUE)==1)) {
popM_unsmoothed <- DemoTools::graduate_mono(Value = popM_unsmoothed,
Age = Age_unsmoothed,
AgeInt = AgeInt_unsmoothed,
OAG = TRUE)
popF_unsmoothed <- DemoTools::graduate_mono(Value = popF_unsmoothed,
Age = Age_unsmoothed,
AgeInt = AgeInt_unsmoothed,
OAG = TRUE)
}
Age1_unsmoothed <- 1:length(popM_unsmoothed) - 1
# splice the BP1 series for ages at or below minLastBPage1 with unsmoothed single age series to age 15 and smoothed series thereafter
popM_BP2 <- c(popM_BP1[Age1 <= minLastBPage1], popM_unsmoothed[Age1_unsmoothed > minLastBPage1 & Age1_unsmoothed < 15], popM1[Age1 >= 15])
popF_BP2 <- c(popF_BP1[Age1 <= minLastBPage1], popF_unsmoothed[Age1_unsmoothed > minLastBPage1 & Age1_unsmoothed < 15], popF1[Age1 >= 15])
# if we are smoothing, then smooth BP2 using the best method from child smoothing before
if (!is.na(smooth_method)) {
if (substr(smooth_method, 1, 8) == "bestMavN") { # if the best smoothing was mav on one year data
mavN <- as.numeric(substr(smooth_method, nchar(smooth_method)-1, nchar(smooth_method)))
popM_BP3_mav <- mavPop1(popM_BP2, Age1)
popM_BP3 <- unlist(select(popM_BP3_mav$MavPopDF, !!paste0("Pop", mavN)))
popF_BP3_mav <- mavPop1(popF_BP2, Age1)
popF_BP3 <- unlist(select(popF_BP3_mav$MavPopDF, !!paste0("Pop", mavN)))
} else { # if the best smoothing was on five-year data
popM5_BP2 <- DemoTools::groupAges(popM_BP2, N=5)
popF5_BP2 <- DemoTools::groupAges(popF_BP2, N=5)
Age5 <- seq(0,max(Age_abr),5)
bestGrad5 <- as.numeric(substr(smooth_method, nchar(smooth_method), nchar(smooth_method)))
if (bestGrad5 == 1) {
popM_BP3 <- DemoTools::graduate_mono(popM5_BP2, AgeInt = DemoTools::age2int(Age5), Age = Age5, OAG = TRUE)
popF_BP3 <- DemoTools::graduate_mono(popF5_BP2, AgeInt = DemoTools::age2int(Age5), Age = Age5, OAG = TRUE)
}
if (bestGrad5 == 2) {
popM5_BP2_mav2 <- DemoTools::smooth_age_5(popM5_BP2, Age5, method = "MAV", n = 2)
popF5_BP2_mav2 <- DemoTools::smooth_age_5(popF5_BP2, Age5, method = "MAV", n = 2)
# splice infants from BP1 to 1-4 year olds from smoothed BP2 and remaining smoothed BP2 thereafter
popMabr_BP2_mav2 <- c(popM_BP1[1], popM5_BP2_mav2[1]-popM_BP1[1], popM5_BP2_mav2[2:length(popM5_BP2_mav2)])
popFabr_BP2_mav2 <- c(popF_BP1[1], popF5_BP2_mav2[1]-popF_BP1[1], popF5_BP2_mav2[2:length(popF5_BP2_mav2)])
popM_BP3 <- DemoTools::graduate_mono(popMabr_BP2_mav2, AgeInt = DemoTools::age2int(Age_abr), Age = Age_abr, OAG = TRUE)
popF_BP3 <- DemoTools::graduate_mono(popFabr_BP2_mav2, AgeInt = DemoTools::age2int(Age_abr), Age = Age_abr, OAG = TRUE)
}
}
popM_BP3 <- c(popM_BP3[Age1 < 15], popM1[Age1 >=15])
popF_BP3 <- c(popF_BP3[Age1 < 15], popF1[Age1 >=15])
} else { # if no smoothing then BP3 = BP2
popM_BP3 <- popM_BP2
popF_BP3 <- popF_BP2
}
# what is the minimum age at which BP1 is higher than BP3 for both males and females
BP1_higher <- popM_BP1 >= popM_BP3 & popF_BP1 >= popF_BP3
minLastBPage3 <- min(Age1[!BP1_higher],10) - 1
# splice the BP1 up to age minLastBPage3 with the BP3
popM_BP4 <- c(popM_BP1[Age1 <= minLastBPage3], popM_BP3[Age1 > minLastBPage3 & Age1 < 15], popM1[Age1 >= 15])
popF_BP4 <- c(popF_BP1[Age1 <= minLastBPage3], popF_BP3[Age1 > minLastBPage3 & Age1 < 15], popF1[Age1 >= 15])
} else { # IF BP1 CAME IN LOWER THAN ORIGINAL AT ALL CHILD AGES, THEN WE SKIP THE STEPS AND JUST RETURN ORIGINAL
popM_BP2 <- popM1
popM_BP3 <- popM1
popM_BP4 <- popM1
popF_BP2 <- popF1
popF_BP3 <- popF1
popF_BP4 <- popF1
}
nAge <- length(Age1)
pop_basepop <- data.frame(SexID = rep(c(rep(1,nAge),rep(2,nAge)),4),
AgeStart = rep(Age1,8),
BPLabel = c(rep("BP1",nAge*2),
rep("BP2",nAge*2),
rep("BP3",nAge*2),
rep("BP4",nAge*2)),
DataValue = c(popM_BP1, popF_BP1,
popM_BP2, popF_BP2,
popM_BP3, popF_BP3,
popM_BP4, popF_BP4))
return(pop_basepop)
}
|
26355ffefa69034544becc0edfb443a050c6fc5e | 7610db967cf364d632ed688930d3db13a1e779bd | /Test 1/Bigmart 1.R | 1f90f38de01a80d2fa88afdacbf48547f733a74c | [] | no_license | Prbn/Big-mart-Sales-Practice-Problem | ab79f6edb05625ff5469716f013bb756c9c7a084 | 8851f5c9c3cb6620224bccc6b151d289f053b699 | refs/heads/master | 2021-05-14T16:18:32.728712 | 2018-01-02T13:20:02 | 2018-01-02T13:20:02 | 116,017,313 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 22,281 | r | Bigmart 1.R | # A 1
# Importing Data
#---------------
# setting up new working directory using the
setwd("D:\\Work\\R\\R Projects\\Big mart Sales Practice Problem\\DATA")
# putting the data frame into an object called stats
test.d <- read.csv("Test_u94Q5KV.csv", stringsAsFactors = F, na.strings = c('') )
train.d <- read.csv("Train_UWu5bXk.csv", stringsAsFactors = F, na.strings = c(''))
# --------------------------
# Loading packages
# -----------------------
library('ggplot2') # visualization
library('ggthemes') # visualization
library('scales') # visualization
library('dplyr') # data manipulation
library('mice') # imputation
library('randomForest') # classification algorithm
library('lubridate') # For date and time
totalsummary <- function(data.df){
st <- str(data.df)
hd <- head(data.df,n=6)
sm <- summary(data.df)
output <- list(Structure=st , Head=hd , Summary=sm)
return(output)
}
# Data Preperation #
# ================ #
# bind training & test data
full.d <- bind_rows(train.d, test.d)
totalsummary(full.d)
# Variable Editing
# Factorizing
full.d$Item_Identifier <- as.factor(full.d$Item_Identifier)
full.d$Item_Fat_Content <- as.factor(full.d$Item_Fat_Content)
full.d$Item_Type <- as.factor(full.d$Item_Type)
full.d$Outlet_Identifier <- as.factor(full.d$Outlet_Identifier)
#full.d$Outlet_Establishment_Year <- as.factor(full.d$Outlet_Establishment_Year)
full.d$Outlet_Size <- as.factor(full.d$Outlet_Size)
full.d$Outlet_Location_Type <- as.factor(full.d$Outlet_Location_Type)
full.d$Outlet_Type <- as.factor(full.d$Outlet_Type)
### MISSING DATA ###
#==================#
# Exploring the missing data
full.d[!complete.cases(full.d),]
# Number of incomplete Rows
nrow(full.d[!complete.cases(full.d),])
# Rows that are missing data of Item_weight #
#-------------------------------------------#
head(full.d[is.na(full.d$Item_Weight),])
# Number of rows, missing data of Item Weight
nrow(full.d[is.na(full.d$Item_Weight),])
# The data can be imputed from the mean of other same data of same item
# Taking care of missing data
# Replacing missing data with mean of same item identity
full.d[is.na(full.d$Item_Weight),'Item_Weight'] <- sapply(full.d[is.na(full.d$Item_Weight),'Item_Identifier'],function(x)mean(full.d[full.d$Item_Identifier==x, 'Item_Weight'], na.rm = TRUE))
# Have all been replaced?
nrow(full.d[is.na(full.d$Item_Weight),])==0
# Rows that are missing data of Item_Fat_Content #
#------------------------------------------------#
head(full.d[is.na(full.d$Item_Fat_Content),])
# Number of rows, missing data of Item Fat Content
nrow(full.d[is.na(full.d$Item_Fat_Content),])
# No missing data
# Exploring different types of Item_Fat_Content
levels(full.d$Item_Fat_Content)
# There are actuall two types only
table(full.d$Item_Fat_Content)
# Correcting
full.d[full.d$Item_Fat_Content =='LF'| full.d$Item_Fat_Content =='low fat', 'Item_Fat_Content'] <- 'Low Fat'
full.d[full.d$Item_Fat_Content =='reg', 'Item_Fat_Content'] <- 'Regular'
# Checking
table(full.d$Item_Fat_Content)
levels(full.d$Item_Fat_Content)
# Refactorizing
full.d$Item_Fat_Content <- as.factor(as.character(full.d$Item_Fat_Content))
# Checking
table(full.d$Item_Fat_Content)
levels(full.d$Item_Fat_Content)
# Rows that are missing data of Item_Visibility #
#------------------------------------------------#
head(full.d[is.na(full.d$Item_Visibility),])
# Number of rows, missing data of Item Visibility
nrow(full.d[is.na(full.d$Item_Visibility),])
# No missing data
# Rows that are missing data of Item_Type #
#------------------------------------------------#
head(full.d[is.na(full.d$Item_Type),])
# Number of rows, missing data of Item Type
nrow(full.d[is.na(full.d$Item_Type),])
# No missing data
# Exploring different types of Item_Type
levels(full.d$Item_Type)
table(full.d$Item_Type)
# Rows that are missing data of Item_MRP #
#------------------------------------------------#
head(full.d[is.na(full.d$Item_MRP),])
# Number of rows, missing data of Item MRP
nrow(full.d[is.na(full.d$Item_MRP),])
# No missing data
# Exploring different types of Item MRP
levels(full.d$Item_MRP)
table(full.d$Item_MRP)
# Rows that are missing data of Outlet_Establishment_Year #
#------------------------------------------------#
head(full.d[is.na(full.d$Outlet_Establishment_Year),])
# Number of rows, missing data of Outlet Establishment Year
nrow(full.d[is.na(full.d$Outlet_Establishment_Year),])
# No missing data
# Exploring different types of Outlet Establishment Year
levels(full.d$Outlet_Establishment_Year)
table(full.d$Outlet_Establishment_Year)
# Rows that are missing data of Outlet_Identifier #
#------------------------------------------------#
head(full.d[is.na(full.d$Outlet_Identifier),])
# Number of rows, missing data of Outlet Size
nrow(full.d[is.na(full.d$Outlet_Identifier),])
# No missing data
# Exploring different types of Outlet Size
factor(full.d$Item_Identifier)
levels(full.d$Item_Identifier)
levels(full.d$Outlet_Identifier)
table(full.d$Outlet_Identifier)
# Rows that are missing data of Outlet_Size #
#------------------------------------------------#
head(full.d[is.na(full.d$Outlet_Size),])
# Number of rows, missing data of Outlet Size
nrow(full.d[is.na(full.d$Outlet_Size),])
# No missing data
# Exploring different types of Outlet Size
levels(full.d$Outlet_Size)
table(full.d$Outlet_Size)
# Are all outlet size of the outlets same
sapply(full.d[is.na(full.d$Outlet_Size),'Outlet_Identifier'],function(x)sum(!is.na(full.d[full.d$Outlet_Identifier == x,'Outlet_Size'])))
# It seems the data of outlet of the store id not present.
# are present store size are all correct
Aa <- table(full.d$Outlet_Identifier,full.d$Outlet_Size)
storesize.d <- data.frame(Outlet_Identifier=levels(full.d$Outlet_Identifier))
storesize.d$sumsales <- sapply(storesize.d$Outlet_Identifier,function(x)sum(full.d[full.d$Outlet_Identifier==x,'Item_Outlet_Sales'],na.rm = TRUE))
storesize.d$Outlet_size <- sapply(storesize.d$Outlet_Identifier,function(x)unique(full.d[full.d$Outlet_Identifier==x,'Outlet_Size']))
storesize.d$Outlet_Location_Type <- sapply(storesize.d$Outlet_Identifier,function(x)unique(full.d[full.d$Outlet_Identifier==x,'Outlet_Location_Type']))
storesize.d$Outlet_Type <- sapply(storesize.d$Outlet_Identifier,function(x)unique(full.d[full.d$Outlet_Identifier==x,'Outlet_Type']))
storesize.d$Number_of_Item <- sapply(storesize.d$Outlet_Identifier,function(x)length(unique(full.d[full.d$Outlet_Identifier==x,'Item_Identifier'])))
storesize.d$Outlet_Establishment_Year <- sapply(storesize.d$Outlet_Identifier,function(x)unique(full.d[full.d$Outlet_Identifier==x,'Outlet_Establishment_Year']))
storesize.d$Age <- 2017-storesize.d$Outlet_Establishment_Year
str(storesize.d)
# Imputing missing Outlet Size values by predictive imputation
# Predictive imputation
# Using 'mice' package. (Multivariate Imputation by Chained Equations)
# 'rpart' (recursive partitioning for regression) can also be used.
# Setting a random Seed
set.seed(123)
# Performing mice imputation
# Excluding certain less-than-useful variables:
mice_mod <- mice(storesize.d[,!names(storesize.d) %in% 'Outlet_Identifier'], method='rf')
# The methord here is Random Forest
# Saving the complete output
mice_output <- complete(mice_mod)
# Store Size
storesize.d$Outlet_size <- mice_output$Outlet_size
# clearing mice variables
rm(mice_mod,mice_output)
# Backing up data
Backup1 <- full.d
#full.d <- Backup1
# imputing missing values
full.d[is.na(full.d$Outlet_Size),'Outlet_Size'] <- sapply(as.character(full.d[is.na(full.d$Outlet_Size),'Outlet_Identifier']),function(x)storesize.d[storesize.d$Outlet_Identifier==x,'Outlet_size'])
# as.character() is used as the factors actual value may be different
# Rows that are missing data of Outlet_Location_Type #
#----------------------------------------------------#
head(full.d[is.na(full.d$Outlet_Location_Type),])
# Number of rows, missing data of Outlet Location Type
nrow(full.d[is.na(full.d$Outlet_Location_Type),])
# No missing data
# Exploring different types of Outlet Location Type
levels(full.d$Outlet_Location_Type)
table(full.d$Outlet_Location_Type)
# Rows that are missing data of Outlet_Type #
#------------------------------------------------#
head(full.d[is.na(full.d$Outlet_Type),])
# Number of rows, missing data of Outlet Type
nrow(full.d[is.na(full.d$Outlet_Type),])
# No missing data
# Exploring different types of Outlet Type
levels(full.d$Outlet_Type)
table(full.d$Outlet_Identifier,full.d$Outlet_Type)
# Rows that are missing data of Item_Outlet_Sales #
#------------------------------------------------#
head(full.d[is.na(full.d$Item_Outlet_Sales),])
# Number of rows, missing data of Outlet Sales
nrow(full.d[is.na(full.d$Item_Outlet_Sales),])
# No missing data
# Feature Engineering #
#=====================#
# Checking if there is significant change in the outlet Types #
#-------------------------------------------------------------#
tapply(full.d[!is.na(full.d$Item_Outlet_Sales),'Item_Outlet_Sales'],full.d[!is.na(full.d$Item_Outlet_Sales),'Outlet_Type'],mean)
tapply(full.d[!is.na(full.d$Item_Outlet_Sales),'Item_Outlet_Sales'],full.d[!is.na(full.d$Item_Outlet_Sales),'Outlet_Type'],summary)
# Plotting a boxplot
ggplot(full.d[!is.na(full.d$Item_Outlet_Sales),], aes(x = Outlet_Type, y = Item_Outlet_Sales, fill = factor(Outlet_Type))) +
geom_boxplot() +
geom_hline(aes(yintercept=mean(full.d[!is.na(full.d$Item_Outlet_Sales),'Item_Outlet_Sales'])), colour='red', linetype='dashed', lwd=2) +
scale_y_continuous(labels=dollar_format()) +
theme_few()
# As the Type of stores are different they wont be altered
# Item visibility #
#-----------------#
# Number of rows where item visibility is zero
nrow(full.d[full.d$Item_Visibility==0,])
# Imputing the zero item visibility with the mean item visibility of each store
full.d[full.d$Item_Visibility==0,'Item_Visibility'] <- sapply(full.d[full.d$Item_Visibility==0,'Outlet_Identifier'],function(x)mean(full.d[(!full.d$Item_Visibility == 0 & full.d$Outlet_Identifier == x),'Item_Visibility']))
# Checking
nrow(full.d[full.d$Item_Visibility==0,])==0
# Making identy identifier #
#--------------------------#
# Creating a new category based on the Item Identifier
# The Item identifier that starts with FD is for food, NC for Non Consmable and DR for Drinks
full.d[grep('^FD',as.character(full.d$Item_Identifier)),'Item_Category'] <- 'Food'
full.d[grep('^NC',as.character(full.d$Item_Identifier)),'Item_Category'] <- 'Non Consumable'
full.d[grep('^DR',as.character(full.d$Item_Identifier)),'Item_Category'] <- 'Drinks'
# Checking if all the row are filled
nrow(full.d[is.na(full.d$Item_Identifier),])==0
# Factorizing
full.d$Item_Category <- factor(full.d$Item_Category)
# Backing UP
Backup1.5 =full.d
# Determining The years of operation of a Outlet
full.d$Outler_Age <- year(now())-full.d$Outlet_Establishment_Year
# Preprocessing #
#===============#
str(full.d)
backup2 <- full.d
#full.d <- backup2
# Encoding Categorical data
full.d$Item_Fat_Content <- factor(full.d$Item_Fat_Content,levels = levels(full.d$Item_Fat_Content),labels = c(1:nlevels(full.d$Item_Fat_Content)))
full.d$Item_Type <- factor(full.d$Item_Type,levels = levels(full.d$Item_Type),labels = c(1:nlevels(full.d$Item_Type)))
#full.d$Outlet_Establishment_Year <- factor(full.d$Outlet_Establishment_Year,levels = levels(full.d$Outlet_Establishment_Year),labels = c(1:nlevels(full.d$Outlet_Establishment_Year)))
full.d$Outlet_Size <- factor(full.d$Outlet_Size,levels = levels(full.d$Outlet_Size),labels = c(1:nlevels(full.d$Outlet_Size)))
full.d$Outlet_Location_Type <- factor(full.d$Outlet_Location_Type,levels = levels(full.d$Outlet_Location_Type),labels = c(1:nlevels(full.d$Outlet_Location_Type)))
full.d$Outlet_Type <- factor(full.d$Outlet_Type,levels = levels(full.d$Outlet_Type),labels = c(1:nlevels(full.d$Outlet_Type)))
full.d$Item_Category <- factor(full.d$Item_Category,levels = levels(full.d$Item_Category),labels = c(1:nlevels(full.d$Item_Category)))
### Splitting the data back into the original test and training sets.
trainA1 <- full.d[!is.na(full.d$Item_Outlet_Sales),]
testA1 <- full.d[is.na(full.d$Item_Outlet_Sales),]
# removing the ids
full.d1 <- full.d
full.d$Item_Identifier <- NULL
full.d$Outlet_Identifier <- NULL
full.d$Outlet_Establishment_Year <- NULL
### Splitting the data back into the original test and training sets.
trainA<- full.d[!is.na(full.d$Item_Outlet_Sales),]
testA <- full.d[is.na(full.d$Item_Outlet_Sales),]
traintestA <- trainA
traintestA1 <- trainA1
# Removing Null from test set
traintestA$Item_Outlet_Sales <- NULL
traintestA1$Item_Outlet_Sales <- NULL
testA$Item_Outlet_Sales <- NULL
Test.Results <- testA1
Train.Results <- trainA1
# Write CSV
makecsvsubmission <- function(dataset1, string){
Data1 <- data.frame(dataset1$Item_Identifier,dataset1$Outlet_Identifier, dataset1[string])
colnames(Data1) <- c('Item_Identifier','Outlet_Identifier','Item_Outlet_Sales')
string <- paste(string, ' Submission.csv',sep = "", collapse = NULL)
write.csv(Data1, file = string, row.names = F)
}
# Full data process complete
# Backup
backup2.5 <- full.d
backuptrain1 <- trainA1
backuptest1 <- testA1
#full.d <- backup2.5
# Setting directory of processed data
setwd("D:\\Work\\R\\R Projects\\Big mart Sales Practice Problem\\Test 1")
### Exporting DATA
write.csv(trainA1, file = 'Train_Processed1.csv', row.names = F)
write.csv(testA1, file = 'Test_Processed1.csv', row.names = F)
write.csv(full.d, file = 'full_Processed1.csv', row.names = F)
# Baseline Model #
#================#
# Mean Based
Test.Results$Overall_Mean_Sales <- mean(trainA1$Item_Outlet_Sales)
Train.Results$Overall_Mean_Sales <- mean(trainA1$Item_Outlet_Sales)
# Outlet Mean Sales Based
Outlet_mean_sales <- sapply(testA1$Outlet_Identifier,function(x)mean(trainA1[trainA1$Outlet_Identifier == x,'Item_Outlet_Sales']))
Test.Results$Outlet_Mean_Sales <- Outlet_mean_sales
Train.Results$Outlet_Mean_Sales <- sapply(traintestA1$Outlet_Identifier,function(x)mean(trainA1[trainA1$Outlet_Identifier == x,'Item_Outlet_Sales']))
rm(Outlet_mean_sales)
# Item Mean Sales Based
Item_mean_sales <- sapply(testA1$Item_Identifier,function(x)mean(trainA1[trainA1$Item_Identifier == x,'Item_Outlet_Sales']))
Test.Results$Item_Mean_Sales <- Item_mean_sales
Train.Results$Item_Mean_Sales <- sapply(traintestA1$Item_Identifier,function(x)mean(trainA1[trainA1$Item_Identifier == x,'Item_Outlet_Sales']))
rm(Item_mean_sales)
# Backup
write.csv(Test.Results, file = 'Test.Results1.csv', row.names = F)
# Overall Mean
makecsvsubmission(Test.Results,'Overall_Mean_Sales')
# Outlet Mean
makecsvsubmission(Test.Results,'Outlet_Mean_Sales')
# Item Mean
makecsvsubmission(Test.Results,'Item_Mean_Sales')
as.numeric()
head(Test.Results)
### Predictive Modeling ###
#=========================#
# Multiple Linear Model
# ---------------------
# Fitting Multiple Linear Regression to the training set.
regressor = lm(formula = Item_Outlet_Sales ~ ., data = trainA)
# Information of the regressor
summary(regressor)
# Predicting the Test set results
y_pred <- predict(regressor,newdata = testA)
# Saving Test results
Test.Results$Mlinearmodel <- y_pred
Train.Results$Mlinearmodel <- predict(regressor,newdata = traintestA)
# Saving results for submission
makecsvsubmission(Test.Results,'Mlinearmodel')
# Building the optimal model
# --------------------------
# Significance level is set to 95%
# Using Backward Elimination
# Starting with all the independent variables
# 1st attempt all variables
regressor = lm(formula = Item_Outlet_Sales ~ Item_Weight + Item_Fat_Content +
Item_Visibility + Item_Type +Item_MRP + Outlet_Size +
Outlet_Location_Type + Outlet_Type + Item_Category + Outler_Age,
data = trainA)
# Then remove the non-significant independent variable
# Using the summary function to find the non-significant independent variable
summary(regressor)
# Removing the insignificant indpendent variable
# 2nd attempt removing Item_Type
regressor = lm(formula = Item_Outlet_Sales ~ Item_Weight + Item_Fat_Content +
Item_Visibility + Item_MRP + Outlet_Size +
Outlet_Location_Type + Outlet_Type + Item_Category + Outler_Age,
data = trainA)
summary(regressor)
# Removing the insignificant indpendent variable
# 3rd attempt removing Item_Weight
regressor = lm(formula = Item_Outlet_Sales ~ Item_Fat_Content +
Item_Visibility + Item_MRP + Outlet_Size +
Outlet_Location_Type + Outlet_Type + Item_Category + Outler_Age,
data = trainA)
summary(regressor)
# Removing the insignificant indpendent variable
# 4th attempt removing Item_Category
regressor = lm(formula = Item_Outlet_Sales ~ Item_Fat_Content +
Item_Visibility + Item_MRP + Outlet_Size +
Outlet_Location_Type + Outlet_Type + Outler_Age,
data = trainA)
summary(regressor)
# Removing the insignificant indpendent variable
# 5th attempt removing Outler_Age
regressor = lm(formula = Item_Outlet_Sales ~ Item_Fat_Content +
Item_Visibility + Item_MRP + Outlet_Size +
Outlet_Location_Type + Outlet_Type,
data = trainA)
summary(regressor)
# Removing the insignificant indpendent variable
# 5th attempt removing Item_Visibility
regressor = lm(formula = Item_Outlet_Sales ~ Item_Fat_Content +
Item_MRP + Outlet_Size +
Outlet_Location_Type + Outlet_Type,
data = trainA)
summary(regressor)
# Final regressor produced
y_pred <- predict(regressor,newdata = testA)
# Saving Test results
Test.Results$OptimalMlinearmodel <- y_pred
Train.Results$OptimalMlinearmodel <- predict(regressor,newdata = traintestA)
# Saving results for submission
makecsvsubmission(Test.Results,'OptimalMlinearmodel')
# SUPPORT VECTOR REGRESSION #
#===========================#
# Using the e1071 package
library(e1071)
# Fitting Support Vector Regression to the dataset
regressor <- svm(formula = Item_Outlet_Sales ~., data = trainA, type = 'eps-regression')
# Info about the regressor using summary() function
summary(regressor)
# Predicting a new result with linear Regression
y_pred = predict(regressor,testA)
# Saving Test results
Test.Results$SVMRegression <- y_pred
Train.Results$SVMRegression <- predict(regressor,traintestA)
# Saving results for submission
makecsvsubmission(Test.Results,'SVMRegression')
# DECISION REGRESSION #
#=====================#
# Using the rpart package
library(rpart)
# Fitting Decision Regression to the dataset
regressor <- rpart(formula = Item_Outlet_Sales ~., data = trainA, control = rpart.control(minsplit = 50))
# Info about the regressor using summary() function
summary(regressor)
# Predicting a new result with linear Regression
y_pred = predict(regressor,testA)
# Saving Test results
Test.Results$DecisionRegression <- y_pred
Train.Results$DecisionRegression <- predict(regressor,traintestA)
# Saving results for submission
makecsvsubmission(Test.Results,'DecisionRegression')
# RANDOM FOREST REGRESSION #
#==========================#
# Using the rpart package
library(randomForest)
# Setting seed
set.seed(1234)
# Fitting Random Forest Regression to the dataset
regressor <- randomForest(x = traintestA, y = trainA$Item_Outlet_Sales, ntree = 500, nodesize = 80)
# Info about the regressor using summary() function
summary(regressor)
# Predicting a new result with linear Regression
y_pred = predict(regressor,testA)
# Saving Test results
Test.Results$RandomForestRegression <- y_pred
Train.Results$RandomForestRegression <- predict(regressor,traintestA)
# Saving results for submission
makecsvsubmission(Test.Results,'RandomForestRegression')
# MEGA Multiple Linear Model
# --------------------------
# Setting up training and test data
trainA <- Train.Results[c('Item_Outlet_Sales','Overall_Mean_Sales','Outlet_Mean_Sales','Item_Mean_Sales','Mlinearmodel','OptimalMlinearmodel','SVMRegression','DecisionRegression','RandomForestRegression')]
testA <- Test.Results[c('Overall_Mean_Sales','Outlet_Mean_Sales','Item_Mean_Sales','Mlinearmodel','OptimalMlinearmodel','SVMRegression','DecisionRegression','RandomForestRegression')]
traintestA <-Train.Results[c('Overall_Mean_Sales','Outlet_Mean_Sales','Item_Mean_Sales','Mlinearmodel','OptimalMlinearmodel','SVMRegression','DecisionRegression','RandomForestRegression')]
# Fitting Multiple Linear Regression to the training set.
regressor = lm(formula = Item_Outlet_Sales ~ ., data = trainA)
# Information of the regressor
summary(regressor)
# Predicting the Test set results
y_pred <- predict(regressor,newdata = testA)
# Saving Test results
Test.Results$Megalinearmodel <- y_pred
Train.Results$Megalinearmodel <- predict(regressor,newdata = traintestA)
# Saving results for submission
makecsvsubmission(Test.Results,'Megalinearmodel')
# Mega RANDOM FOREST REGRESSION #
#===============================#
# Setting up training and test data
trainA <- Train.Results[c('Item_Outlet_Sales','Overall_Mean_Sales','Outlet_Mean_Sales','Item_Mean_Sales','Mlinearmodel','OptimalMlinearmodel','SVMRegression','DecisionRegression','RandomForestRegression')]
testA <- Test.Results[c('Overall_Mean_Sales','Outlet_Mean_Sales','Item_Mean_Sales','Mlinearmodel','OptimalMlinearmodel','SVMRegression','DecisionRegression','RandomForestRegression')]
traintestA <-Train.Results[c('Overall_Mean_Sales','Outlet_Mean_Sales','Item_Mean_Sales','Mlinearmodel','OptimalMlinearmodel','SVMRegression','DecisionRegression','RandomForestRegression')]
# Using the rpart package
library(randomForest)
# Setting seed
set.seed(1234)
# Fitting Random Forest Regression to the dataset
regressor <- randomForest(x = traintestA, y = trainA$Item_Outlet_Sales, ntree = 500, nodesize = 1)
# Info about the regressor using summary() function
summary(regressor)
# Predicting a new result with linear Regression
y_pred = predict(regressor,testA)
# Saving Test results
Test.Results$MegaRandomForestRegression <- y_pred
Train.Results$MegaRandomForestRegression <- predict(regressor,traintestA)
# Saving results for submission
makecsvsubmission(Test.Results,'MegaRandomForestRegression')
|
1fe602c369b6bd007034fc7c96919791a6bd15e5 | 1f1b48ad3c0718328974920392537c256740f6be | /plot3.R | 52c0d8e3dd4105ab423f64fdf26bec19a5522db3 | [] | no_license | gopalkriz/EDAproject2 | 96d1115875a40d7ce2d70aeff0cfc5075963597b | 3e6559dea42235251200bc4fcfd3843cd33cba4e | refs/heads/master | 2020-12-24T14:01:45.639424 | 2015-07-22T07:21:48 | 2015-07-22T07:21:48 | 39,411,436 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,443 | r | plot3.R | ```
#Coursera #Exploratory Data Analysis # Project2 # date 20July2015
#About Project: view ReamMe.rd file for the entire Project Description
#Overview: Graphical Analysis of Ambient Air Pollution / Fine particulate matter (PM2.5)
#Database (USA): National Emissions Inventory (NEI) from Environmental Protection Agency (EPA)
#Data URL in single zipfile Data for Peer Assessment [29Mb]
```
##Stage0
#Operating System and Environment Specs
#WindowsXP Pentium4 2.8GHz Ram2GB
#R version 3.2.0 Platform: i386-w64-mingw32/i386 (32-bit)
#Rstudio Version 0.99.442
```
#Preparatory step
ls(); getwd(); dir()
setwd("C:/Documents and Settings/Divekar/R workspace"); dir()
library(base)
library(utils)
library(stats)
library(httr)
library(graphics)
library(grDevices)
library(ggplot2)
```
##Stage1: getting the files
#Download the dataset; unzip the file; review the contents
#URL: https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip
```
if(!file.exists("./NEI_EDAproject")) {
dir.create("./NEI_EDAproject")
}else {cat("Directory exist")}
if(!exists(fileURL) {
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
}else {cat("fileURL already exist in R environment")}
if(!file.exists("./NEI_EDAproject/NEIdataset.zip")){
download.file(fileURL, destfile= "./NEI_EDAproject/NEIdataset.zip")
}else {cat("NEIdataset.zip exist in folder./NEI_EDAproject")} # Windows not needed to use method="curl"
#Unzip the folder and files
if(!file.exists("./NEI_EDAproject/Source_Classification_Code.rds")){
unzip(zipfile="./NEI_EDAproject/NEIdataset.zip",exdir="./NEI_EDAproject")
}else {cat("Source_Classification_Code.rds exist in folder./NEI_EDAproject")}
if(!file.exists("./NEI_EDAproject/summarySCC_PM25.rds")){
unzip(zipfile="./NEI_EDAproject/NEIdataset.zip",exdir="./NEI_EDAproject")
}else {cat("summarySCC_PM25.rds exist in folder./NEI_EDAproject")}
```
#Verification of datafiles being available in the required folder
list.files(file.path("./NEI_EDAproject"))
#PM2.5 Emissions Data: summarySCC_PM25.rds
#Source Classification Code Table: Source_Classification_Code.rds
```
##Stage2: getting data
#Read the files to input the data
#for refernce: DataTable Size is NEI 64976516obs 6variables. SCC 11717obs 15variables.
```
NEI <- readRDS("./NEI_EDAproject/summarySCC_PM25.rds") #largesize, will take time to download
SCC <- readRDS("./NEI_EDAproject/Source_Classification_Code.rds")
```
##Stage3: understanding the data structure
```
nrow(NEI);ncol(NEI); class(NEI); names(NEI)
head(NEI, n=3); tail(NEI, n=3)
#table(NEI$year, NEI$type) #takes much time to process
range(NEI$Emissions); summary(NEI)
#
nrow(SCC);ncol(SCC); class(SCC);names(SCC)
head(SCC, n=3); tail(SCC, n=3);summary(SCC)
#table(SCC$Data.Category,SCC$SCC.Level.One) #takes much time to process
```
##Stage4
#Q3. Instructions: [use ggplot2 plotting system]
```
#Make subset for factor Baltimore City (ie. fips == "24510") of all variables
BaltCityNEI <- subset (NEI, fips == "24510")
#Below two lines are used for preliminery undestanding of the dataset
#nrow(BaltCity);ncol(BaltCity); class(BaltCity); names(BaltCity)
#table(BaltCity$type, BaltCity$year); summary(BaltCity)
#Sum the Emission by Year and Type(source)
BCemissiontype <- aggregate(Emissions ~ year + type, BaltCityNEI, sum)
#Below two lines are used for preliminery undestanding of the dataset
#nrow(BCemissiontype);ncol(BCemissiontype); class(BCemissiontype); names(BCemissiontype)
#table(BCemissiontype$type, BCemissiontype$year); summary(BCemissiontype)
```
#Preparing the Graphical Plot
#plots to default graphic device / monitor
plot3 <- ggplot(BCemissiontype,
aes(year, Emissions, color = type)) +
geom_line() +
ggtitle("Emission by Type of PM2.5 by Annual Year in Baltimore City")
plot3 #plot on scree for visual
#Open graphic device to png; create the plot; close the connection
png('plot3.png')
plot3
dev.off(); getwd()# file is in working directory
```
#Understanding and Analysis of the plot
#Q3.Type-wise Emission for Baltimore City, Maryland ( fips == "24510" ) from 1999 to 2008?
#
#A3.Point type has seen increase in emission over the period 1999 to 2008
#Non-Road, Non-Point, On-Road types have seen decrease in emission over the period 1999 to 2008
#Point source have seen an increase in emission over the period 1999 to 2008
#the rate and magnitude of change has variation for each Type as visible from the plot
### |
c0df4d86195292ffce6ea322783f920e92e6088a | 4e27985b0d0eaeb9c7188883309f5c01b08f0827 | /man/get_comp.Rd | bfd136672aa87b22c7d32690031751011dc5fc8e | [] | no_license | nxskok/poistan | 6064803d61785c7fb2ac2037ec602187d3766cc4 | b352b66d79a16d305df6c3ba371874a7fc5c08f6 | refs/heads/master | 2020-06-01T07:19:27.814275 | 2015-05-17T17:26:10 | 2015-05-17T17:26:10 | 34,591,010 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 327 | rd | get_comp.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/get_data.R
\name{get_comp}
\alias{get_comp}
\title{get competition(s) from data base}
\usage{
get_comp(id)
}
\arguments{
\item{id}{competition id (scalar)}
}
\value{
data frame of results
}
\description{
get competition(s) from data base
}
|
8cc989bd264ae195e921f13bdfc371ed9c485a91 | 4155eeadd175d2518f31d9b353396b0a94bbde60 | /tests/testthat/test-svg.R | 6b23758353952fff1078f1a87223fd847dac38a5 | [
"MIT"
] | permissive | trafficonese/weathericons | 66eaab8cc3c67ad8b4f848ca55cf30a9acfdf9d7 | c2f287f0a934b9994383577529eb4bb6ca271074 | refs/heads/master | 2022-04-07T20:23:14.221855 | 2020-03-09T09:28:46 | 2020-03-09T09:28:46 | 242,313,213 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,114 | r | test-svg.R |
test_that("Test SVGs", {
expect_error(weathersvg("wi-cloudyerr"))
x <- weathersvg("wi-cloudy", style = list(fill = "red",
width = "10%"))
expect_true(inherits(x, "html"))
expect_true(inherits(x, "character"))
x <- weathersvg("wi-wind-deg.svg", style = list(fill = "blue",
rotate = "45deg",
width = "10%"))
expect_true(inherits(x, "html"))
expect_true(inherits(x, "character"))
x <- weathersvg("wi-train.svg", style = list(fill = "yellow",
width = "10%"))
expect_true(inherits(x, "html"))
expect_true(inherits(x, "character"))
x <- weathersvg("wi-train.svg", style = list(fill = "yellow",
width = "10%"),
className = "myclass")
expect_true(inherits(x, "html"))
expect_true(inherits(x, "character"))
x <- weathersvg("wi-train.svg", className = "myclass")
expect_true(inherits(x, "html"))
expect_true(inherits(x, "character"))
})
|
46b0141d76e7feef4ba4946a9ff452ce78d8aeaf | bc7da505db39e6e5b0dcbca9e9c935dd4ca34eb3 | /R/rolling_var.R | 08f32728c25dc8962d56989da318825863c83ed1 | [] | no_license | cran/PheVis | fba5649ea6b3abbfcd0a74b7c54afd5fd2b51dc9 | cc19834d117db49595edb989c89d29a4da74aa8d | refs/heads/master | 2023-03-09T14:13:42.362852 | 2021-02-23T08:40:21 | 2021-02-23T08:40:21 | 339,444,782 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,407 | r | rolling_var.R | #' @title rolling_var
#' @description Compute rolling variables (last visit, last 5 visits, last month and last year)
#'
#' @param id Patient id numeric vector
#' @param var Variable numeric vector
#' @param start_date Time numeric vector
#' @param id_encounter Encounter id vector
#'
#' @return A dataframe containing the rolling variables.
rolling_var <- function(id, var, start_date, id_encounter){
. <- NULL
## set year and month sizes
win_size_month = 30
win_size_year = 365
## create df
df <- data.frame(id = id,
id_encounter = id_encounter,
var = var,
start_date = start_date)
## compute rolling var
df_rolling <- df %>%
filter(!is.na(df$start_date)) %>%
group_by(.data$id) %>%
arrange(start_date) %>%
mutate(last_vis = lag(.data$var),
last_5vis = cum_lag(x = .data$var, n_lag = 5),
cum = cumsum(.data$var)) %>%
replace(is.na(.),0) %>%
arrange(.data$id, .data$start_date)
df_roll_time <- roll_time_sum(id = id,
id_encounter = id_encounter,
var = var,
start_date = start_date,
win_size1 = win_size_month,
win_size2 = win_size_year)
result <- df_rolling %>%
inner_join(df_roll_time)
return(result)
}
|
eb8cf012e6222dd3389910843be76f0cf91a0a57 | 6531e2964e1938417fa6b5404a704635575d7960 | /R/qc_spot_etuff.r | 74865df3020150dd9d054b7976a25552e75211b0 | [] | no_license | galuardi/tags2etuff | 1451efba0220dd66f81796da48307d036a46db4e | 68d1b58ada8e325577c99848a5feb56a23416c8f | refs/heads/master | 2023-08-29T14:15:14.719884 | 2021-09-15T13:30:33 | 2021-09-15T13:30:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,849 | r | qc_spot_etuff.r | #' Simple quality control plots for SPOT eTUFF file
#'
#' This function is to do very simple QC on SPOT tag conversion to eTUFF and the
#' associated metadata. It does NOT do any filtering or further processing of
#' the positions. Instead, it is meant to check that the coordinates, time
#' stamps, location classes and associated metadata all make sense.
#'
#' @param etuff is etuff file
#' @param meta_row is the associated metadata
#' @param writePNG is logical indicating whether or not to write the output to file as PNG
#'
#' @return a QC plot
#' @export
#'
#' @importFrom lattice levelplot
#' @import ggplot2
#' @import ggforce
qc_spot_etuff <- function(etuff, meta_row, writePNG = FALSE){
## any where datetime and variablevalue are identical?
if (class(etuff) != 'etuff') stop('Input etuff object must be of class etuff.')
bins <- etuff$bins
meta <- etuff$meta
#etuff <- etuff$etuff
## spread etuff back to tidy format
#df <- etuff$etuff %>% dplyr::select(-c(VariableID, VariableUnits)) %>% spread(VariableName, VariableValue)
df <- etuff$etuff
df$latitude <- as.numeric(df$latitude)
df$longitude <- as.numeric(df$longitude)
df <- df[which(!is.na(df$latitude)),]
#df$DateTime <- as.POSIXct(df$DateTime, tz='UTC')
xl <- c(min(min(df$longitude), min(meta_row$geospatial_lon_start), min(meta_row$geospatial_lon_end)),
max(max(df$longitude), max(meta_row$geospatial_lon_start), max(meta_row$geospatial_lon_end)))
yl <- c(min(min(df$latitude), min(meta_row$geospatial_lat_start), min(meta_row$geospatial_lat_end)),
max(max(df$latitude), max(meta_row$geospatial_lat_start), max(meta_row$geospatial_lat_end)))
## check appropriate sign on start/end and track coords
## just send warning as possible to have diff
if (any(sign(df$latitude) != sign(as.numeric(meta_row$geospatial_lat_start)) |
sign(df$latitude) != sign(as.numeric(meta_row$geospatial_lat_end)) |
sign(as.numeric(meta_row$geospatial_lat_end)) != sign(as.numeric(meta_row$geospatial_lat_start)))){
warning('Check latitudes.')
print(paste('Lat start', meta_row$geospatial_lat_start, '.'))
print(paste('Lat end', meta_row$geospatial_lat_end, '.'))
print(paste('Sign of track lat is', unique(sign(df$latitude)), '.'))
}
if (any(sign(df$longitude) != sign(meta_row$geospatial_lon_start) |
sign(df$longitude) != sign(meta_row$geospatial_lon_end) |
sign(meta_row$geospatial_lon_end) != sign(meta_row$geospatial_lon_start))){
warning('Check longitudes.')
print(paste('Lon start', meta_row$geospatial_lon_start, '.'))
print(paste('Lon end', meta_row$geospatial_lon_end, '.'))
print(paste('Sign of track lon is', unique(sign(df$longitude)), '.'))
}
#==========
## BUILD PLOT
#==========
## get world map data
world <- map_data('world')
#if (writePNG) pdf(paste(meta_row$instrument_name, '-ggmap.pdf', sep=''), width=8, height=12)
p1 <- ggplot() + geom_polygon(data = world, aes(x=long, y = lat, group = group)) +
coord_fixed(xlim=xl, ylim=yl, ratio=1.3) + xlab('') + ylab('') +
geom_path(data = df, aes(x = longitude, y = latitude)) +
geom_point(data = df, aes(x = longitude, y = latitude, colour = DateTime)) +
geom_point(data = meta_row, aes(x = geospatial_lon_start, y = geospatial_lat_start), colour = c('green'), fill = c('green'), shape = 24) +
geom_point(data = meta_row, aes(x = geospatial_lon_end, y = geospatial_lat_end), colour = c('red'), fill = c('red'), shape = 24) +
ggtitle(paste(meta_row$instrument_name, meta_row$platform))
p2 <- ggplot(df, aes(y = latitude, x = DateTime, colour = DateTime)) + geom_point() + geom_path() +
geom_point(data = meta_row, aes(x = time_coverage_start, y = geospatial_lat_start), colour = c('green'), fill = c('green'), shape = 24) +
geom_point(data = meta_row, aes(x = time_coverage_end, y = geospatial_lat_end), colour = c('red'), fill = c('red'), shape = 24)
p3 <- ggplot(df, aes(y = longitude, x = DateTime, colour = DateTime)) + geom_point() + geom_path() +
geom_point(data = meta_row, aes(x = time_coverage_start, y = geospatial_lon_start), colour = c('green'), fill = c('green'), shape = 24) +
geom_point(data = meta_row, aes(x = time_coverage_end, y = geospatial_lon_end), colour = c('red'), fill = c('red'), shape = 24)
#layout_matrix = rbind(c(1), c(2), c(3)))
if (writePNG){
g <- gridExtra::arrangeGrob(grobs = list(p1, p2, p3), ncol=1, heights = c(6,2,2))
ggsave(file = paste(meta_row$instrument_name, '-ggmap.png', sep=''), width=8, height=8, units = 'in', g)
#dev.off()
print(paste('Maps written to ', meta_row$instrument_name, '-ggmap.png.', sep=''))
} else{
gridExtra::grid.arrange(grobs = list(p1, p2, p3), ncol=1, heights = c(6,2,2))
print('Should have output plot to graphics device.')
}
}
|
fc6ca0cf25150f96f84d2d2a49f8756c8fd12581 | cd75f45d6fa3ca02fd18954f1c977318e359325f | /25_02_2020_measles/25_02_2020_measles.R | 3b09f889d2e8cc195ec404ba72e74b9ca59bf234 | [] | no_license | camecry/ILoveTidyTuesday | 45f2bad1137aa5b76e55c274a80878864f86624a | 93691c392ebbf6fd241c65320c439e3da87e812b | refs/heads/master | 2020-12-19T14:19:37.800976 | 2020-11-17T14:09:04 | 2020-11-17T14:09:04 | 235,759,513 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,876 | r | 25_02_2020_measles.R | # TidyTuesday 25/02/2020
# Measles
# Get the Data
library(tidyverse)
measles <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-02-25/measles.csv')
# Filter only year 2018-2019
data <- measles %>%
filter(year == "2018-19")
# Select only few columns & drop na
dt <- data %>%
select(index, state, county, city, name, xper) %>%
drop_na()
# Filter schools here at least 25% of students is exempted from vaccination for personal reasons
dt <- dt %>%
arrange(desc(xper)) %>%
filter(xper > 25.00)
dt <- distinct(dt, index, .keep_all = TRUE)
# Getting shorter names for certain schools
dt[27, "name"] <- "Montessori Education Centre"
dt[35, "name"] <- "Montessori D. P. Lakeside"
dt[50, "name"] <- "Montessori Education Centre"
dt[42, "name"] <- "American Leaders A.-Gilbert"
dt[45, "name"] <- "Elem Sch for Arts & Academics"
dt[5, "name"] <- "Eagleridge E. Program"
# Start seetting the plot -> See https://www.r-graph-gallery.com/circular-barplot.html
# Set a number of 'empty bar' to add at the end of each group
empty_bar <- 2
# Add lines to the initial dataset
to_add <- matrix(NA, empty_bar, ncol(dt))
colnames(to_add) <- colnames(dt)
dt <- rbind(dt, to_add)
dt$id <- seq(1, nrow(dt))
# Get the name and the y position of each label
label_data <- dt
number_of_bar <- nrow(label_data)
angle <- 90 - 360 * (label_data$id-0.5) /number_of_bar
label_data$hjust <- ifelse(angle < -90, 1, 0)
label_data$angle <- ifelse(angle < -90, angle+180, angle)
# palette
pal <- c("#D4761F","#050AC1")
# Make the plot of students exempted from vaccination for personal reasons
# Big thanks to Jake Lawlor's Tidy tuesday submission on food-related C2
p <- ggplot(dt, aes(x=as.factor(id), y=xper))+
geom_bar(stat="identity", aes(fill=state)) +
scale_fill_manual(values=pal)+
ylim(-50,90) +
theme_bw() +
theme_minimal() +
theme(
axis.text = element_blank(),
axis.title = element_blank(),
panel.grid = element_blank(),
plot.margin = unit(rep(-1,4,), "cm")) +
annotate(geom = "text",
x=0,y=-50,
hjust=.5, vjust=1,
label="Schools where at least 25% of
students is exempted from
vaccination for personal reasons",
size=4.3, lineheight=.8,
family="Staatliches Regular",
color="black") +
annotate(geom = "text",
x=0,y=200,
vjust=1,
hjust=.5,
label = "Data: The Wallstreet Journal | Graphic: Cristina Cametti - @CameCry",
size=2.5,
color="black")+
coord_polar(start = 0) +
geom_text(data=label_data, aes(x=id, y=xper+1, label=name, hjust=hjust), color="black",
fontface="bold",alpha=0.6, size=2.6, angle=label_data$angle, inherit.aes = FALSE )
p
|
d121f9587b1a5d71fcc916b4448e25a544534e15 | a6969f000d222cbfb0853181120a40461085d67e | /navbar/ui.R | 1a5e067a5664b436707850768d50f6a52be9e51c | [] | no_license | benporter/wiredhack | a38bf081e14a1bbd066f31232f400f2e38d06f29 | 7243f6f5181b989e95e67ff424403c705088ca63 | refs/heads/master | 2021-01-02T12:56:50.741129 | 2014-06-30T01:06:50 | 2014-06-30T01:06:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,859 | r | ui.R | library(shiny)
library(sqldf)
library(ggplot2)
require(gridExtra)
library(ggmap)
#require(rCharts)
#options(RCHART_LIB = 'sankey')
#library(googleVis)
shinyUI(navbarPage("York Tech",
tabPanel("The Student Body",
sidebarPanel(
h6("Gender"),
checkboxInput("male", "Male", TRUE),
checkboxInput("female", "Female", TRUE),
h6("Race"),
radioButtons("raceFilter", "", #"Race Filter",
list("All Races"="AllRaces",
"American Indian"="AmericanIndian",
"Asian"="Asian",
"Black"="Black" ,
"Hispanic"= "Hispanic",
"More than one"= "Morethanone",
"Native Hawaiian"= "NativeHawaiian",
"Non-Resident Alien"= "Non-ResidentAlien",
"Unknown"= "Unknown",
"White"= "White"),
selected="All Races")
),
mainPanel(h3("Explore our student body"),
plotOutput("distPlot")
)
),
tabPanel("Map",
sidebarPanel(
h6("The Top 10 Feeder Schools:"),
radioButtons("HSpick", "Select Your High School",
list("All High Schools" = "AllHighSchools",
"Out of State HS" = "OutofStateHS",
"Rock Hill High School" = "RockHillHighSchool",
"Northwestern High School" = "NorthwesternHighSchool" ,
"York Comprehensive High School" = "YorkComprehensiveHighSchool",
"Clover High School" = "CloverHighSchool",
"Fort Mill High School" = "FortMillHighSchool",
"Lancaster County School" = "LancasterCountySchool",
"District Chester High School" = "DistrictChesterHighSchool",
"South Pointe High School" = "SouthPointeHighSchool",
"Nation Ford High School" = "NationFordHighSchool"),
selected="All High Schools"),
h6("Map Parameters"),
sliderInput("jitter", "Point Jitter:",
min = 0, max = 0.5, value = 0.05, step= 0.01)
),
mainPanel(
h4("Follow in their footsteps. Apply to York Technical College today!"),
plotOutput("map")
)
),
tabPanel("Explorer",
h4("Find people like you:"),
dataTableOutput("dfExplorer")
),
tabPanel("Program by Gender",
plotOutput("genderProgram")
),
tabPanel("Student Clusters",
sidebarPanel(
radioButtons("cluster", "Choose Cluster",
list("1"="1",
"2"="2",
"3"="3",
"4"="4" ,
"5"="5",
"6"="6"),
selected="1")
),
mainPanel(
h5("It is not a homogenous set of students, but of distinct groups of people.")
,plotOutput("clustPlot")
)
),
tabPanel("Debug"
,textOutput("debug")
,tableOutput("dfprint")
)
))
|
efb977b550d4b4ce3f5c7bdc5725864daa3460bf | 268d63deccd2aa6e2c16ef181d0a1d22f71ed8ed | /workout03/binomial/man/bin_cumulative.Rd | d2468e18fdbc7791b0f8eff61ba580d0945373a6 | [] | no_license | yhed10123/stat133-spring-2019 | e22977a5fbbadd524d07e2422fe601048281c9e4 | c809002e0ac676baed065a1a95a638eac7f4f537 | refs/heads/master | 2020-04-28T20:19:17.368871 | 2019-05-04T04:59:07 | 2019-05-04T04:59:07 | 175,535,954 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 463 | rd | bin_cumulative.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/binomial.R
\name{bin_cumulative}
\alias{bin_cumulative}
\title{Cumulated Distribution}
\usage{
bin_cumulative(trials, prob)
}
\arguments{
\item{trials}{number of trials}
\item{prob}{probability}
}
\value{
Data frame with the Probability Value and Cumulated Value
}
\description{
Computes Cumulated distribution
}
\examples{
dis2 <- bin_cumulative(trials = 5, prob = 0.5)
plot(dis2)
}
|
ee119872ba3fffa6d13522c30dd450ac51a0887e | 4b2cd6e05b49cd6d99c5491779d947e6b95cfe78 | /Sipsa.R | e480fdb14bbc13e96f73101334902e440fcaba96 | [] | no_license | mariomicolta/sipsa | 2baa1bd164b245da004c9c2c2aae0d08db44d293 | d783a465282b6cb380c3ff81146e592edffc103a | refs/heads/main | 2023-03-01T01:26:15.843065 | 2021-02-05T17:43:49 | 2021-02-05T17:43:49 | 331,174,856 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 23,097 | r | Sipsa.R | # Date: 19 enero de 2021
# Authors: Jose Luis Gamarra Palacios - John Mario Micolta Garzon
# Attrs
# * artiId: Id del articulo
# * artiNombre: Nombre del articulo
# * fechaIni: Fecha de captura del dato
# * fuenId: Unknown
# * fuenNombre: Unknown
# * futiId: Unknown
# * maximoKg: Precio maximo por kg
# * minimoKg: Precio minimo por kg
# * promedioKg: Precio promedio por kg
###########################################
# Load libraries
#Para el formato de fechas en español Jan -> Ene
Sys.setlocale(locale = "Spanish")
set.seed(1234)
library(tidyverse)
library(lubridate)
library(xts)
library(forecast)
library(tseries)
library(rlist)
library(ForecastComb)
# Load Data
loadData <- function(pathFile = 'datamensual.csv'){
# Capturar con soap
# Funcion para cargar los datos
data <- read_csv2(pathFile, locale = locale(encoding = "Latin1"))
return(data)
}
# Filter Data
filterData <- function(productName){
# Filtramos por nombre.
data <- data %>% filter(Producto == productName)
return(data)
}
formatDate <- function(fecha){
f <- lubridate::dmy(paste0("01-", fecha))
return(f)
}
# Prepare Data
prepareData <- function(){
#CAMBIAR
data <- data %>% mutate(
Fecha = formatDate(Fecha),
Grupo = as.character(Grupo),
Producto = as.character(Producto),
Mercado = as.character(Mercado),
precio = as.double(`Precio por kilogramo`)
)
data <- data[,-c(5)]
#Sort
data <- data %>% arrange(Fecha)
return(data)
}
createTimeSeries <- function(target, start, frequency){
#target: variable objeto de estudio
data.ts <- ts(data[,target], start = c(year(start), month(start)), frequency = frequency)
return(data.ts)
}
splitData <- function(data, percentage = .3){
#Funcion para cortar los datos
#data: datos en formato ts o xts
#percentage: porcentaje de datos a utilizar como test
h <- round(length(data) * percentage)
th <- length(data.ts) - h
#train <- data.ts[1:th,]
#test <- data.ts[(th+1):(th+h),]
train <- subset(data.ts, start = 1, end = th)
test <- subset(data.ts, start = th + 1, end = th + h)
return(list(train = train, test = test, h = h, th = th))
}
trainArima <- function(ic = 'aic', data){
#ic = ('aic', 'bic', 'aicc')
significance_level <- 0.05
flag = TRUE
arima <- auto.arima(y = data, max.p = 30, max.q = 30, ic = ic, stepwise = FALSE)
orders <- data.frame(as.list(arimaorder(arima)))
model <- Arima(train, order = c(orders$p, orders$d, orders$q))
# *** No autocorrelación
# Prueba de rachas
runstest <- runs.test(factor(residuals(model) > 0))
if(runstest$p.value < significance_level){
#stop('Se rechaza la no autocorrelación')
flag = FALSE
}
# Box pierce
boxPierceTable <- tableBoxPierceOrLjungBox(residuals(model))
minBoxPierce <- min(boxPierceTable['p-valor'])
maxBoxPierce <- max(boxPierceTable['p-valor'])
if(minBoxPierce < significance_level){
#stop('Se rechaza la no autocorrelación')
flag = FALSE
}
# *** Homocedasticidad
#Ljung-Box
ljungBoxTable <- tableBoxPierceOrLjungBox(residuals(model)^2, type = "Ljung-Box")
minLjungBox <- min(ljungBoxTable['p-valor'])
maxLjungBox <- max(ljungBoxTable['p-valor'])
if(minLjungBox < significance_level){
#stop('Se rechaza la homocedasticidad')
flag = FALSE
}
# Normalidad
testNormJarqueBera <- jarque.bera.test(residuals(model))
testNormShapiro <- shapiro.test(residuals(model))
if(testNormJarqueBera$p.value < significance_level | testNormShapiro$p.value < significance_level){
#stop('Se rechaza la normalidad de los residuos')
flag = FALSE
}
return(list(flag = flag, p = orders$p, d = orders$d, q = orders$q))
}
# Da error
trainPolynomial <- function(data, degree = 2){
significance_level <- 0.05
#ERROR
# No reconoce los parámetros dentro de la función poly. Es decir, al parecer, se debe escribir directamente
#el grado del polinomio y no poner degree
model <- tslm(data ~ poly(trend, degree) + season, data = data)
flag = TRUE
# *** No autocorrelación
# Prueba de rachas
runstest <- runs.test(factor(residuals(model) > 0))
if(runstest$p.value < significance_level){
#stop('Se rechaza la no autocorrelación')
flag = FALSE
}
# Box pierce
boxPierceTable <- tableBoxPierceOrLjungBox(residuals(model))
minBoxPierce <- min(boxPierceTable['p-valor'])
maxBoxPierce <- max(boxPierceTable['p-valor'])
if(minBoxPierce < significance_level){
#stop('Se rechaza la no autocorrelación')
flag = FALSE
}
# *** Homocedasticidad
#Ljung-Box
ljungBoxTable <- tableBoxPierceOrLjungBox(residuals(model)^2, type = "Ljung-Box")
minLjungBox <- min(ljungBoxTable['p-valor'])
maxLjungBox <- max(ljungBoxTable['p-valor'])
if(minLjungBox < significance_level){
#stop('Se rechaza la homocedasticidad')
flag = FALSE
}
# Normalidad
testNormJarqueBera <- jarque.bera.test(residuals(model))
testNormShapiro <- shapiro.test(residuals(model))
if(testNormJarqueBera$p.value < significance_level | testNormShapiro$p.value < significance_level){
#stop('Se rechaza la normalidad de los residuos')
flag = FALSE
}
return(list(flag = flag))
}
trainingModelsFixed <- function(h, th, data, train){
ptm <- proc.time()
#Funcion para entrenar los modelos usando una ventana fija
#h: tamaño del horizonte (cuantos periodos usamos como test)
#th: periodos en los datos de train
#data: todos los datos
ordersArimaAIC <- trainArima(ic = 'aic', data = train)#organizar este train luego
ordersArimaBIC <- trainArima(ic = 'bic', data = train)
ordersArimaAICC <- trainArima(ic = 'aicc', data = train)
#polynomial2 <- trainPolynomial(degree = 2, data = train)
arimaAIC <- NULL
arimaBIC <- NULL
arimaAICC <- NULL
mediaMovil3 <- NULL
mediaMovil4 <- NULL
mediaMovil5 <- NULL
mediaMovil6 <- NULL
mediaMovil7 <- NULL
mediaMovil8 <- NULL
mediaMovil9 <- NULL
suavizacionExponencialSimple <- NULL
suavizacionExponencialLineal <- NULL
holtWinterAditivo <- NULL
holtWinterMultiplicativo <- NULL
if(ordersArimaAIC$flag){
arimaAIC <- forecast(Arima(train, order = c(ordersArimaAIC$p, ordersArimaAIC$d, ordersArimaAIC$q)), h = h)$mean
}
if(ordersArimaBIC$flag){
arimaBIC <- forecast(Arima(train, order = c(ordersArimaBIC$p, ordersArimaBIC$d, ordersArimaBIC$q)), h = h)$mean
}
if(ordersArimaAICC$flag){
arimaAICC <- forecast(Arima(train, order = c(ordersArimaAICC$p, ordersArimaAICC$d, ordersArimaAICC$q)), h = h)$mean
}
#mediaMovil3 <- forecast(ma(train , order = 3), h = 2)$mean
#mediaMovil4 <- forecast(ma(train , order = 4), h = (h-1))$mean
#mediaMovil5 <- forecast(ma(train , order = 5), h = 3)$mean
#mediaMovil6 <- forecast(ma(train , order = 6), h = 4)$mean
#mediaMovil7 <- forecast(ma(train , order = 7), h = 4)$mean
#mediaMovil8 <- forecast(ma(train , order = 8), h = 5)$mean
#mediaMovil9 <- forecast(ma(train , order = 9), h = 5)$mean
suavizacionExponencialSimple <- ses(train, h = h)$mean
suavizacionExponencialLineal <- holt(train, h = h)$mean
holtWinterAditivo <- hw(train, h = h, seasonal = "additive")$mean
holtWinterMultiplicativo <- hw(train, h = h, seasonal = "multiplicative")$mean
print(proc.time () - ptm)
# Estos modelos no requieren validacion, por lo tanto, entran sin compromiso
lista <- list(
#'mediaMovil3' = list('name' = 'mediaMovil3', 'forecast' = mediaMovil3),
#'mediaMovil4' = list('name' = 'mediaMovil4', 'forecast' = mediaMovil4),
#'mediaMovil5' = list('name' = 'mediaMovil5', 'forecast' = mediaMovil5),
#'mediaMovil6' = list('name' = 'mediaMovil6', 'forecast' = mediaMovil6),
#'mediaMovil7' = list('name' = 'mediaMovil7', 'forecast' = mediaMovil7),
#'mediaMovil8' = list('name' = 'mediaMovil8', 'forecast' = mediaMovil8),
#'mediaMovil9' = list('name' = 'mediaMovil9', 'forecast' = mediaMovil9),
'suavizacionExponencialSimple' = list('name' = 'suavizacionExponencialSimple', 'forecast' = suavizacionExponencialSimple),
'suavizacionExponencialLineal' = list('name' = 'suavizacionExponencialLineal', 'forecast' = suavizacionExponencialLineal),
'holtWinterAditivo' = list('name' = 'holtWinterAditivo', 'forecast' = holtWinterAditivo),
'holtWinterMultiplicativo' = list('name' = 'holtWinterMultiplicativo', 'forecast' = holtWinterMultiplicativo))
if(ordersArimaAIC$flag){
lista <- list.append(lista, 'arimaAIC' = list('name' = 'arimaAIC', 'forecast' = arimaAIC))
}
if(ordersArimaBIC$flag){
lista <- list.append(lista, 'arimaBIC' = list('name' = 'arimaBIC', 'forecast' = arimaBIC))
}
if(ordersArimaAICC$flag){
lista <- list.append(lista, 'arimaAICC' = list('name' = 'arimaAICC', 'forecast' = arimaAICC))
}
return(lista)
}
trainingModelsMoving <- function(h, th, data, train){
ptm <- proc.time()
#Funcion para entrenar los modelos usando una ventana movil
#h: tamaño del horizonte (cuantos periodos usamos como test)
#th: periodos en los datos de train
#data: todos los datos
ordersArimaAIC <- trainArima(ic = 'aic', data = train)#organizar este train luego
ordersArimaBIC <- trainArima(ic = 'bic', data = train)
ordersArimaAICC <- trainArima(ic = 'aicc', data = train)
#polynomial2 <- trainPolynomial(degree = 2, data = train)
arimaAIC <- NULL
arimaBIC <- NULL
arimaAICC <- NULL
mediaMovil3 <- NULL
mediaMovil4 <- NULL
mediaMovil5 <- NULL
mediaMovil6 <- NULL
mediaMovil7 <- NULL
mediaMovil8 <- NULL
mediaMovil9 <- NULL
suavizacionExponencialSimple <- NULL
suavizacionExponencialLineal <- NULL
holtWinterAditivo <- NULL
holtWinterMultiplicativo <- NULL
for(i in 1:h){
ventana <- subset(data, start = 1 + i, end = th - 1 + i)
if(ordersArimaAIC$flag){
arimaAIC[i] <- forecast(Arima(ventana, order = c(ordersArimaAIC$p, ordersArimaAIC$d, ordersArimaAIC$q)), h = 1)$mean
}
if(ordersArimaBIC$flag){
arimaBIC[i] <- forecast(Arima(ventana, order = c(ordersArimaBIC$p, ordersArimaBIC$d, ordersArimaBIC$q)), h = 1)$mean
}
if(ordersArimaAICC$flag){
arimaAICC[i] <- forecast(Arima(ventana, order = c(ordersArimaAICC$p, ordersArimaAICC$d, ordersArimaAICC$q)), h = 1)$mean
}
mediaMovil3[i] <- tail(forecast(ma(ventana , order = 3), h = 2)$mean, 1)
mediaMovil4[i] <- tail(forecast(ma(ventana , order = 4), h = 3)$mean, 1)
mediaMovil5[i] <- tail(forecast(ma(ventana , order = 5), h = 3)$mean, 1)
mediaMovil6[i] <- tail(forecast(ma(ventana , order = 6), h = 4)$mean, 1)
mediaMovil7[i] <- tail(forecast(ma(ventana , order = 7), h = 4)$mean, 1)
mediaMovil8[i] <- tail(forecast(ma(ventana , order = 8), h = 5)$mean, 1)
mediaMovil9[i] <- tail(forecast(ma(ventana , order = 9), h = 5)$mean, 1)
suavizacionExponencialSimple[i] <- ses(ventana, h = 1)$mean
suavizacionExponencialLineal[i] <- holt(ventana, h = 1)$mean
holtWinterAditivo[i] <- hw(ventana, h = 1, seasonal = "additive")$mean
holtWinterMultiplicativo[i] <- hw(ventana, h = 1, seasonal = "multiplicative")$mean
}
print(proc.time () - ptm)
# Estos modelos no requieren validacion, por lo tanto, entran sin compromiso
lista <- list(
'mediaMovil3' = list('name' = 'mediaMovil3 Movil', 'forecast' = mediaMovil3),
'mediaMovil4' = list('name' = 'mediaMovil4 Movil', 'forecast' = mediaMovil4),
'mediaMovil5' = list('name' = 'mediaMovil5 Movil', 'forecast' = mediaMovil5),
'mediaMovil6' = list('name' = 'mediaMovil6 Movil', 'forecast' = mediaMovil6),
'mediaMovil7' = list('name' = 'mediaMovil7 Movil', 'forecast' = mediaMovil7),
'mediaMovil8' = list('name' = 'mediaMovil8 Movil', 'forecast' = mediaMovil8),
'mediaMovil9' = list('name' = 'mediaMovil9 Movil', 'forecast' = mediaMovil9),
'suavizacionExponencialSimple' = list('name' = 'suavizacionExponencialSimple Movil', 'forecast' = suavizacionExponencialSimple),
'suavizacionExponencialLineal' = list('name' = 'suavizacionExponencialLineal Movil', 'forecast' = suavizacionExponencialLineal),
'holtWinterAditivo' = list('name' = 'holtWinterAditivo Movil', 'forecast' = holtWinterAditivo),
'holtWinterMultiplicativo' = list('name' = 'holtWinterMultiplicativo Movil', 'forecast' = holtWinterMultiplicativo))
if(ordersArimaAIC$flag){
lista <- list.append(lista, 'arimaAIC' = list('name' = 'arimaAIC Movil', 'forecast' = arimaAIC))
}
if(ordersArimaBIC$flag){
lista <- list.append(lista, 'arimaBIC' = list('name' = 'arimaBIC Movil', 'forecast' = arimaBIC))
}
if(ordersArimaAICC$flag){
lista <- list.append(lista, 'arimaAICC' = list('name' = 'arimaAICC Movil', 'forecast' = arimaAICC))
}
return(lista)
}
trainingModelsRecursive <- function(h, th, data, train){
ptm <- proc.time()
#Funcion para entrenar los modelos usando una ventana recursiva
#h: tamaño del horizonte (cuantos periodos usamos como test)
#th: periodos en los datos de train
#data: todos los datos
ordersArimaAIC <- trainArima(ic = 'aic', data = train)#organizar este train luego
ordersArimaBIC <- trainArima(ic = 'bic', data = train)
ordersArimaAICC <- trainArima(ic = 'aicc', data = train)
#polynomial2 <- trainPolynomial(degree = 2, data = train)
arimaAIC <- NULL
arimaBIC <- NULL
arimaAICC <- NULL
mediaMovil3 <- NULL
mediaMovil4 <- NULL
mediaMovil5 <- NULL
mediaMovil6 <- NULL
mediaMovil7 <- NULL
mediaMovil8 <- NULL
mediaMovil9 <- NULL
suavizacionExponencialSimple <- NULL
suavizacionExponencialLineal <- NULL
holtWinterAditivo <- NULL
holtWinterMultiplicativo <- NULL
for(i in 1:h){
ventana <- subset(data, start = 1, end = th - 1 + i)
if(ordersArimaAIC$flag){
arimaAIC[i] <- forecast(Arima(ventana, order = c(ordersArimaAIC$p, ordersArimaAIC$d, ordersArimaAIC$q)), h = 1)$mean
}
if(ordersArimaBIC$flag){
arimaBIC[i] <- forecast(Arima(ventana, order = c(ordersArimaBIC$p, ordersArimaBIC$d, ordersArimaBIC$q)), h = 1)$mean
}
if(ordersArimaAICC$flag){
arimaAICC[i] <- forecast(Arima(ventana, order = c(ordersArimaAICC$p, ordersArimaAICC$d, ordersArimaAICC$q)), h = 1)$mean
}
mediaMovil3[i] <- tail(forecast(ma(ventana , order = 3), h = 2)$mean, 1)
mediaMovil4[i] <- tail(forecast(ma(ventana , order = 4), h = 3)$mean, 1)
mediaMovil5[i] <- tail(forecast(ma(ventana , order = 5), h = 3)$mean, 1)
mediaMovil6[i] <- tail(forecast(ma(ventana , order = 6), h = 4)$mean, 1)
mediaMovil7[i] <- tail(forecast(ma(ventana , order = 7), h = 4)$mean, 1)
mediaMovil8[i] <- tail(forecast(ma(ventana , order = 8), h = 5)$mean, 1)
mediaMovil9[i] <- tail(forecast(ma(ventana , order = 9), h = 5)$mean, 1)
suavizacionExponencialSimple[i] <- ses(ventana, h = 1)$mean
suavizacionExponencialLineal[i] <- holt(ventana, h = 1)$mean
holtWinterAditivo[i] <- hw(ventana, h = 1, seasonal = "additive")$mean
holtWinterMultiplicativo[i] <- hw(ventana, h = 1, seasonal = "multiplicative")$mean
}
print(proc.time () - ptm)
# Estos modelos no requieren validacion, por lo tanto, entran sin compromiso
lista <- list(
'mediaMovil3' = list('name' = 'mediaMovil3 Recursive', 'forecast' = mediaMovil3),
'mediaMovil4' = list('name' = 'mediaMovil4 Recursive', 'forecast' = mediaMovil4),
'mediaMovil5' = list('name' = 'mediaMovil5 Recursive', 'forecast' = mediaMovil5),
'mediaMovil6' = list('name' = 'mediaMovil6 Recursive', 'forecast' = mediaMovil6),
'mediaMovil7' = list('name' = 'mediaMovil7 Recursive', 'forecast' = mediaMovil7),
'mediaMovil8' = list('name' = 'mediaMovil8 Recursive', 'forecast' = mediaMovil8),
'mediaMovil9' = list('name' = 'mediaMovil9 Recursive', 'forecast' = mediaMovil9),
'suavizacionExponencialSimple' = list('name' = 'suavizacionExponencialSimple Recursive', 'forecast' = suavizacionExponencialSimple),
'suavizacionExponencialLineal' = list('name' = 'suavizacionExponencialLineal Recursive', 'forecast' = suavizacionExponencialLineal),
'holtWinterAditivo' = list('name' = 'holtWinterAditivo Recursive', 'forecast' = holtWinterAditivo),
'holtWinterMultiplicativo' = list('name' = 'holtWinterMultiplicativo Recursive', 'forecast' = holtWinterMultiplicativo))
if(ordersArimaAIC$flag){
lista <- list.append(lista, 'arimaAIC' = list('name' = 'arimaAIC Recursive', 'forecast' = arimaAIC))
}
if(ordersArimaBIC$flag){
lista <- list.append(lista, 'arimaBIC' = list('name' = 'arimaBIC Recursive', 'forecast' = arimaBIC))
}
if(ordersArimaAICC$flag){
lista <- list.append(lista, 'arimaAICC' = list('name' = 'arimaAICC Recursive', 'forecast' = arimaAICC))
}
return(lista)
}
# Ensambles
trainandTestEnsambles <- function(data, train){
ptm <- proc.time()
arimaAIC <- NULL
arimaBIC <- NULL
arimaAICC <- NULL
mediaMovil3 <- NULL
mediaMovil4 <- NULL
mediaMovil5 <- NULL
mediaMovil6 <- NULL
mediaMovil7 <- NULL
mediaMovil8 <- NULL
mediaMovil9 <- NULL
suavizacionExponencialSimple <- NULL
suavizacionExponencialLineal <- NULL
holtWinterAditivo <- NULL
holtWinterMultiplicativo <- NULL
mediaMovil3 <- ma(data, order = 3)
mediaMovil3[length(mediaMovil3)] <- forecast(ma(train, order = 3), h=2)$mean
suavizacionExponencialSimple <- ses(data)$fitted
suavizacionExponencialLineal <- holt(data)$fitted
holtWinterAditivo <- hw(data, seasonal = 'additive')$fitted
holtWinterMultiplicativo <- hw(data, seasonal = 'multiplicative')$fitted
arima <- Arima(data.ts, order = c(2,2,3))$fitted
inSample <- cbind(mediaMovil3,
suavizacionExponencialSimple,
suavizacionExponencialLineal,
holtWinterAditivo,
holtWinterMultiplicativo,
arima)
inSample
dataComb <- foreccomb(observed_vector = data, prediction_matrix = inSample, newobs = data,
newpreds = inSample)
# *** MÉTODOS SIMPLES
comb_SA <- rolling_combine(dataComb, comb_method ="comb_SA")
comb_MED <- rolling_combine(dataComb, comb_method ="comb_MED")
comb_TA <- rolling_combine(dataComb, comb_method ="comb_TA")
comb_WA <- rolling_combine(dataComb, comb_method ="comb_WA")
# MÉTODOS BASADOS EN REGRESIÓN
comb_OLS <- rolling_combine(dataComb, comb_method ="comb_OLS")
comb_LAD <- rolling_combine(dataComb, comb_method ="comb_LAD")
comb_CLS <- rolling_combine(dataComb, comb_method ="comb_CLS")
print(proc.time () - ptm)
# Test
metricsLabels <- c("ME", "RMSE", "MAE", "MPE", "MAPE")
names <- c('comb_SA', 'comb_MED', 'comb_TA', 'comb_WA', 'comb_OLS', 'comb_LAD', 'comb_CLS')
metrics <- rbind(comb_SA$Accuracy_Test["Test set", metricsLabels],
comb_MED$Accuracy_Test["Test set", metricsLabels],
comb_TA$Accuracy_Test["Test set", metricsLabels],
comb_WA$Accuracy_Test["Test set", metricsLabels],
comb_OLS$Accuracy_Test["Test set", metricsLabels],
comb_LAD$Accuracy_Test["Test set", metricsLabels],
comb_CLS$Accuracy_Test["Test set", metricsLabels])
metrics <- data.frame(metrics)
row.names(metrics) <- names
metrics <- cbind(metrics, names)
return(metrics)
}
evaluateModels <- function(models, test){
# Funcion para evaluar el rendimiento de los modelos frente a los datos de test
#models: lista de pronosticos de los modelos evaluados anteriormente
#test: datos de test
metricsLabels <- c("ME", "RMSE", "MAE", "MPE", "MAPE")
metrics <- NULL
names <- NULL
for (model in models){
acc <- accuracy(model$forecast, test)["Test set", metricsLabels]
metrics <- rbind(metrics, acc)
names <- rbind(names, model$name)
}
metrics <- data.frame(metrics)
#row.names(metrics) <- names
metrics <- cbind(metrics, names)
return(metrics)
}
compareModels <- function(metrics){
#PARAMETRIZAR LA ELECCION DE LA METRICA
#Funcion para comparar los modelos previamente evaluados. Esta retorna el mejor modelo y su indice en el arr
#metrics: dataframe que contiene todas las metricas de evaluacion de cada modelo
#indexMetrics = list("ME" = 1, "RMSE" = 2, "MAE" = 3, "MPE" = 4, "MAPE" = 5)
indexMinValue <- which.min(metrics$RMSE)
bestModel <- metrics[indexMinValue,]
#row.names(metrics[indexMinValue,])
#return(list(bestModel = bestModel, indexMinValue = indexMinValue))
return(bestModel)
}
tableBoxPierceOrLjungBox <- function(residuo, maxLag = 20, type = "Box-Pierce"){
# se crean objetos para guardar los resultados
estadistico <- matrix(0, maxLag, 1)
pvalue <- matrix(0, maxLag, 1)
# se calcula la prueba para los diferentes rezagos
for (i in 1:maxLag) {
test <- Box.test(residuo, lag = i, type = type)
estadistico[i] <- test$statistic
pvalue[i] <- round(test$p.value, 5)
}
labels <- c("Rezagos", type, "p-valor")
tableBody <- cbind(matrix(1:maxLag, maxLag, 1), estadistico, pvalue)
#TABLABP <- data.frame(tableBody)
table <- data.frame(tableBody)
names(table) <- labels
return(table)
}
#
#Start
#globalParams <- setParams(productName = 'Queso costeño',
# frequencyTs = 365.25/7,
# startTs = decimal_date(ymd("2020-02-01")))
start <- function(){
set.seed(1234)
ptm <- proc.time()
# meter todo aki
proc.time() - ptm
}
data <- loadData(pathFile = 'datamensual.csv')
productName = 'Queso costeño'
#frequencyTs = 365.25/7 #Semanal
frequencyTs = 12
data <- filterData(productName = productName)
#startTs = decimal_date(ymd("2020-02-01"))
startTs <- formatDate(min(data$Fecha))
startTs
data <- prepareData()
data.ts <- createTimeSeries('precio', start = startTs, frequency = frequencyTs)
splitDataResult <- splitData(data = data.ts, percentage = .3)
train <- splitDataResult$train
test <- splitDataResult$test
h <- splitDataResult$h
th <- splitDataResult$th # se puede sustituir con length(train)
# Training models
modelsMoving <- trainingModelsMoving(h, th, data.ts, train)
modelsMoving
modelsRecursive <- trainingModelsRecursive(h, th, data.ts, train)
modelsRecursive
modelsFixed <- trainingModelsFixed(h, th, data.ts, train)
modelsFixed
# Metrics
metricsMoving <- evaluateModels(modelsMoving, test)
metricsMoving
metricsRecursive <- evaluateModels(modelsRecursive, test)
metricsRecursive
metricsFixed <- evaluateModels(modelsFixed, test)
metricsFixed
allMetrics <- rbind(metricsMoving, metricsRecursive, metricsFixed)
allMetrics
# Compare metrics and select model
bestModel <- compareModels(allMetrics)
bestModel
# Ensambles
metricsComb <- trainandTestEnsambles(data.ts, train)
bestModelComb <- compareModels(metricsComb)
bestModelComb
# Pasar este best model a una función que con varios if o casewhen
#trainModelA()
#Al final el restulado lo ploteamos y devolvemos toda la data de inter[es]
|
7c62cf51bce56e963b9a7c20c9ea994a8c407723 | afe7ea7d235c1d6477c5612a8fc35dfcff6c5430 | /R/get.idx.R | 02d7ca3de57d7012c6fbfd980231a8608de29b3f | [
"MIT"
] | permissive | schyen/MetaboMate | 575a4626f880e6e9bdff7adab97a08f32fd0851a | 2acb812b20cd752ba33073b6800227b96200ff94 | refs/heads/master | 2022-03-15T22:40:57.737116 | 2019-12-09T00:11:22 | 2019-12-09T00:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 356 | r | get.idx.R | #' Find indices in ppm vector for respective chemical shift range
#' @param range Chemical shift range (in ppm)
#' @param ppm Ppm vector
#' @export
#' @aliases get.idx
#' @author Torben Kimhofer \email{tkimhofer@@gmail.com}
get.idx <- function(range = c(1, 5), ppm) {
range <- sort(range, decreasing = T)
which(ppm <= range[1] & ppm >= range[2])
}
|
de37adbcc1bc1b30b0318f655151097d77468794 | 6ef9da6cc040293b624e6680a8ff4e656be92cd6 | /man/rates.yppe.Rd | 4c3716c7fc6603dec1101c4af8f2ae7b3ba0fb7b | [
"MIT"
] | permissive | fndemarqui/YPPE | a7723566ed12225215200d5756b63fa67090bf09 | 18e95e11a8a6abd8265e8fbf5185c20d538a0522 | refs/heads/master | 2023-05-09T19:47:51.641149 | 2023-05-05T11:33:12 | 2023-05-05T11:33:12 | 209,976,128 | 2 | 1 | NOASSERTION | 2023-09-10T19:24:42 | 2019-09-21T11:48:24 | R | UTF-8 | R | false | true | 517 | rd | rates.yppe.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{rates.yppe}
\alias{rates.yppe}
\title{Estimated failure rates for the PE distribution}
\usage{
\method{rates}{yppe}(object, ...)
}
\arguments{
\item{object}{a fitted model object.}
\item{...}{further arguments passed to or from other methods.}
}
\value{
the estimated failure rates for the PE distribution.
}
\description{
Estimated failure rates for the PE distribution
}
\details{
Method only available for ML approach.
}
|
86a98d1a56f010ea09c4510f37c60ab2d22c7343 | af883594be37bf9b58d4d11c05ac685bb0919652 | /ML/ML_Project02_MLB & KBO.R | 3e13747b138c964d081f78f5d0b1ce8f899c78dd | [] | no_license | handaeho/lab_R | 7593574af1dc345c1f98f92c219c3af3d411d493 | c735aaf2cb004254bf57637ec059ee8344c3f4f9 | refs/heads/master | 2020-11-26T08:40:18.922074 | 2019-12-19T09:17:59 | 2019-12-19T09:17:59 | 229,017,638 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,925 | r | ML_Project02_MLB & KBO.R | # 필요 패키지 리스트 구성
list_of_packages = c("Lahman", "ggplot2", "tidyverse", "ggthemes", "extrafont",
"tibble", "rvest", "stringr", "extrafont")
# 해당 라이브러리 및 종속 패키지 모두 설치
new_packages = list_of_packages[!(list_of_packages %in% installed.packages()[,"Package"])]
if(length(new_packages)) install.packages(new_packages)
# library vs. require
# library() 명령어로 패키지를 불러올 때 패키지가 없으면 오류가 나지만, require는 패키지가 없어도 그냥 무시한다는 차이가 있다.
# sapply() : lapply()는 리스트를 반환. but, sapply()는 벡터를 반환.
# 여러 패키지 한번에 불러오기
sapply(list_of_packages, require, character.only = TRUE)
# 1. MLB 베타분포 ------------------------------------
# 투수 제외하고 타율 계산
# H : 안타 / AB : 타석
career = Batting %>%
dplyr::filter(AB > 0) %>%
anti_join(Pitching, by = "playerID") %>%
group_by(playerID) %>%
dplyr::summarize(H = sum(H), AB = sum(AB)) %>%
mutate(average = H / AB)
# 성과 이름을 붙여 가독성 높임.
career = Master %>%
tbl_df() %>%
dplyr::select(playerID, nameFirst, nameLast) %>%
unite(name, nameFirst, nameLast, sep = " ") %>%
inner_join(career, by = "playerID") %>%
dplyr::select(-playerID)
# 타석이 500석 이상 선수만 추려냄.
career_filtered = career %>%
filter(AB >= 500)
m <- MASS::fitdistr(career_filtered$average, dbeta,
start = list(shape1 = 1, shape2 = 10))
alpha0 <- m$estimate[1]
beta0 <- m$estimate[2]
career_filtered %>%
dplyr::filter(AB > 500) %>%
ggplot() +
geom_histogram(aes(average, y = ..density..), binwidth = .005, color="darkgray", fill="gray") +
stat_function(fun = function(x) dbeta(x, alpha0, beta0), color = "red",
size = 1) +
theme_bw(base_family="NanumGothic") +
labs(x="타율", y="밀도(Density")
# 2. 2017년 KBO리그 ------------------------------------
## 2.1. KBO 통계(6/28일) -------------------------------
kbo_career = "data/kbo.csv"
kbo_html <- xml2::read_html(kbo_url)
Sys.setlocale("LC_ALL", "English")
kbo_hitter_tbl <- rvest::html_nodes(x=kbo_html, xpath='//*[@id="table1"]')
kbo_hitter <- rvest::html_table(kbo_hitter_tbl)[[1]]
Sys.setlocale("LC_ALL", "Korean")
# 3. KBO 경험적 베이즈(Empirical Bayes) (6/28일) -------------------------------
kbo_hitter_eb <- kbo_hitter %>%
dplyr::select(`선수 (팀)`, 타수, 안타, 타율) %>%
add_row(`선수 (팀)` = "강지광 (넥센)", 타수 = 2, 안타 = 0, 타율 =0) %>%
add_row(`선수 (팀)` = "노경은 (롯데)", 타수 = 2, 안타 = 0, 타율 =0) %>%
add_row(`선수 (팀)` = "최경철 (삼성)", 타수 = 1, 안타 = 1, 타율 =1) %>%
add_row(`선수 (팀)` = "이성곤 (두산)", 타수 = 3, 안타 = 1, 타율 =0.333) %>%
mutate(베이즈_타율 = round((안타 + alpha0) / (타수 + alpha0 + beta0), 3))
kbo_hitter_eb %>%
dplyr::arrange(타수) %>% DT::datatable()
prior_g <- kbo_hitter_eb %>%
dplyr::select(`선수 (팀)`, 타율) %>%
ggplot() +
geom_histogram(aes(타율, y = ..density..), binwidth = .01, color="darkgray", fill="gray", alpha=0.3) +
theme_bw(base_family="NanumGothic") +
labs(x="타율", y="밀도(Density")
## 3.2. KBO 베이즈 타율 시각화 (6/28일) -------------------------------
posterior_g <- kbo_hitter_eb %>%
dplyr::select(`선수 (팀)`, 타율, 베이즈_타율) %>%
gather(구분, 타율, -`선수 (팀)`) %>%
ggplot() +
geom_histogram(aes(타율, y = ..density.., fill=구분), binwidth = .005, alpha=0.3) +
theme_bw(base_family="NanumGothic") +
stat_function(fun = function(x) dbeta(x, alpha0, beta0), color = "red", size = 1) +
scale_x_continuous(limits=c(0.2, 0.40)) +
labs(x="타율", y="밀도(Density") +
theme(legend.position = "top")
gridExtra::grid.arrange(prior_g, posterior_g, nrow=1) |
c57f8f8cce56c277ff3352dddcc52134676d9d67 | 3b0df78cf861c93962a2c76dc023b37b5f2c4a83 | /2020/week4_spotify_songs/spotify_songs.R | 7f5688d52b7ab47b4738db078e0a4e8cbd90e276 | [] | no_license | AmitLevinson/TidyTuesday | e84b238c9e656340c95145ddeebaff4fd9c5f992 | 68fa50eb61ee1fca2254c27364314056b83cc986 | refs/heads/master | 2021-08-03T13:37:26.694477 | 2021-07-24T08:29:38 | 2021-07-24T08:29:38 | 209,969,163 | 25 | 8 | null | null | null | null | UTF-8 | R | false | false | 2,668 | r | spotify_songs.R | library(tidyverse)
library(stopwords)
library(tidytext)
library(textdata)
library(scales)
library(showtext)
#Read the data
spotify_songs <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-01-21/spotify_songs.csv')
#unnesting words
word_track <- spotify_songs %>%
select(track_name) %>%
unnest_tokens(word, track_name) %>%
select(word)
#searching for sentiment matches
word_count <- word_track %>%
count(word, sort = T) %>%
#Matching with sentiment date.frame
inner_join(get_sentiments("bing")) %>%
#reordering factor acording to frequency
mutate(word = fct_reorder(word, n)) %>%
#If you decide to use 'nrc' sentiment, not neccessary for 'bing'
filter(sentiment %in% c("positive", "negative"), word != "feat") %>%
#groupiing so we can slice the top 15 from each group
group_by(sentiment) %>%
slice(1:15)
#Let's add a fun font for the plot:
font_add_google("Boogaloo", "Boogaloo")
showtext_auto()
ggplot(word_count,aes(word, n))+
#Create the bars to match spotify logo
geom_col(fill = "black")+
coord_flip()+
#'free_y' helps in organizing the facet_wrap neatly
facet_wrap(. ~ sentiment,scales = "free_y")+
#creating a log10 scale
scale_y_log10(breaks = c(1,10,100,1000),
labels = trans_format("log10", math_format(10^.x)))+
labs(title = "Song Title Sentiment Analysis",
subtitle = paste0("Analysis was conducted on 5,000 songs from spotify.",
" Top 15 most frequent words are shown."),
caption = "Data: @kaylinquest | plot: @Amit_Levinson")+
theme_minimal()+
theme(text = element_text(family = "Boogaloo"),
axis.title = element_blank(),
axis.text.y = element_text(size = 32),
axis.text.x = element_text(size = 26),
strip.text = element_text(size = 30),
plot.caption = element_text(size = 18, face = "italic"),
plot.title = element_text(size = 46),
plot.subtitle = element_text(size = 26),
#adding a nice background to match spotify logo
panel.background = element_rect(fill = "lightgreen"),
plot.background = element_rect(fill = "lightgreen"),
panel.grid.major = element_line(color = "grey70", size = 0.2),
panel.grid.minor = element_line(color = "grey70", size = 0.2))
ggsave("spotify.png", height = 4, width = 6)
#instead of the unnest_token you can use the stringr::str_extract_all approach
#(for some reason it gives us 10 less words, plus it seems a little more complicated):
#word_track_stringr <- tibble(word = unlist(stringr::str_extract_all(spotify_songs$track_name, boundary("word")))) |
ba451929e98a72410d1b41c7f04f2356d48ae1e0 | 577fb24e1f77b1c9b923ee138f6ba8014e18910e | /markdowns/01datawrangling.R | cbfa59d958925cbd18c3624596167320cbf23889 | [] | no_license | hthomps/FinalProject | 76202099e2835ace84400a2aacf52eabf88dfb8b | 4c1bde5ff0b95177c0e84e7ecbbf09fd1e2bdc14 | refs/heads/master | 2021-02-16T09:59:39.915345 | 2020-04-17T15:45:29 | 2020-04-17T15:45:29 | 244,992,844 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,416 | r | 01datawrangling.R | # Data wrangling for Biol812 final project
# done by Regan Cross, Hayden Wainwright, Marco Lee, and Hana Thompson
# March 19, 2020
# load in transcriptome data
dat <- read.csv("data/MothData.csv", header=T)
# right now each column is a transcriptome and each row is a gene,
# with expression levels in the data cells
# the transcriptome names include their tissue type and life stage
# load packages
library(tidyverse)
library(reshape2)
# we need to melt the dataset to each row a unique gene - transcriptome combination
# we'll use the melt function in the reshape2 package
dat2 <- dat %>% melt(id.vars = c("gene_id", "gene_description"),
measure.vars = c("H.L2.D1", "H.L3.D1", "H.L4.12h", "H.L4.Late",
"H.L5.12hS", "H.L5.D2S", "H.L5.preWS", "H.P.Late",
"H.A.D1", "H.A.D3", "H.A.D7", "F.L4.Late", "F.L5.D1",
"F.L5.preW", "F.L5.W", "F.P.D13S", "F.P.D1518", "F.A.D13",
"F.A.D79", "W.E.3hS", "W.E.LateS", "W.L1.D1S", "W.L2.D1S",
"W.L3.D1S", "G.L2.unk", "G.L3.LateS", "G.L4.0h", "G.L4.12hS", "G.L4.LateS",
"G.L5.13hS", "G.L5.D1S", "G.L5.preWS", "G.L5.WS", "G.L5.W",
"G.P.D1", "G.P.D1518", "G.A.D35", "MT.L5.preW", "MT.A.D1",
"MT.A.D3", "M.L4.Late", "M.L5.12h", "M.L5.12hS", "M.L5.preW",
"M.L5.preWS","M.L5.W", "M.L5.WS", "T.P.D3", "T.P.D1518",
"T.A.D13", "O.P.D1518", "O.A.D1", "H.A.D1F1S", "H.A.D1F2S",
"H.A.D1F3S", "H.A.D1F4S", "H.A.D1M1S", "H.A.D1M2S", "H.A.D1M3S",
"H.A.D1M4S", "An.L5.1S", "An.L5.2S", "An.L5.3S", "An.A.F1S", "An.A.F2S", "An.A.F3S", "An.A.MS"))
# let's rename the "variable" and "value" columns to something more informative
colnames(dat2)[3] <- "transcriptome"
colnames(dat2)[4] <- "copynumber"
# let's pull apart the transcriptome names into the tissue type (which is the code before
# the first . ), life stage (the code between .'s), and whatever is on the end
# we use the separate command in dplyr
dat2 <- dat2 %>% separate(transcriptome, into = c("tissue", "life_stage", "note"),
remove = FALSE, extra = "drop")
# remove = FALSE so it keeps the tracnscriptome column
# let's just grab the data for the life stages (adult and 5th instar larva)
# and tissue types (fatbody, head, and midgut) that we want
subdat <- dat2 %>% filter(life_stage == "L5" | life_stage == "A") %>% filter(tissue == "H" | tissue == "G" | tissue == "F")
# change them to factors for easier graphing later on
subdat$life_stage <- as.factor(subdat$life_stage)
subdat$tissue <- as.factor(subdat$tissue)
# now lets make separate dataesets for each tissue type, and get the mean expression
# level for each lifestage
headdat <- subdat %>% filter(tissue == "H") %>% group_by(gene_id, life_stage) %>% summarise(mean = round(mean(copynumber), digits = 3))
gutdat <- subdat %>% filter(tissue == "G") %>% group_by(gene_id, life_stage) %>% summarise(mean = round(mean(copynumber), digits = 3))
fatdat <- subdat %>% filter(tissue == "F") %>% group_by(gene_id, life_stage) %>% summarise(mean = round(mean(copynumber), digits = 3))
# now we cast it back out so there is a column for adult (A) and a column for larva (L5)
# and then take the difference between those two columns
headdat2 <- headdat %>% dcast(gene_id ~ life_stage, mean) %>% mutate(diff = abs(A - L5)) %>% arrange(desc(diff))
# now if we click the "diff" column in the data frame (opened through the environment)
# it'll order them so we can see the max diff's
# the head genes with the greatest difference between adult and L5 are:
# Msex2.07524, Msex2.15420, Msex2.14343
gutdat2 <- gutdat %>% dcast(gene_id ~ life_stage, mean) %>% mutate(diff = abs(A - L5)) %>% arrange(desc(diff))
# the midgut genes with the greatest difference between adult and L5 are:
# Msex2.04431, Msex2.15420, Msex2.14343
fatdat2 <- fatdat %>% dcast(gene_id ~ life_stage, mean) %>% mutate(diff = abs(A - L5)) %>% arrange(desc(diff))
# the fatbody genes with the greatest difference between adult and L5 are:
# Msex2.15420, Msex2.01694, Msex2.10735
|
b0defca9a45667dfcaceacdbb02794e37206dae2 | 70afa58e6ad00a88d745e72512c39f6b57a6c5f4 | /Plot1.R | f73fd768627398fb7dd1704e07db0913c65b067a | [] | no_license | mattymoo/ExData_Plotting1 | 1193dbd18705c020e28c9bbb1e2fdd09cbadbdfc | b177539a2294f096d2390f76a97788b981569c5c | refs/heads/master | 2021-01-23T21:34:35.746923 | 2014-06-06T23:21:55 | 2014-06-06T23:21:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 682 | r | Plot1.R | powertest <-read.table("household_power_consumption.txt",header=TRUE,sep = ";", stringsAsFactors=FALSE, na.strings="?")
powertest$Date <-as.Date(powertest$Date, format="%d/%m/%Y")
powertest$Global_active_power <-as.numeric(powertest$Global_active_power)
powersub <- powertest[(powertest$Date >= "2007-02-01") & (powertest$Date <= "2007-02-02"), ]
x <-strptime(paste(powersub$Date, powersub$Time), format = "%Y-%m-%d %H:%M:%S")
powersub[,3] <-as.numeric(powersub[,3])
hist(powersub$Global_active_power, col="red", xlab="Global Active Power (kilowatts)", main="Global Active Power",)
axis(1, at=c(0,1000,2000,3000), labels=c("0","2","4","6"))
dev.copy(png, file="Plot1.png")
dev.off() |
fb0b1560860265314f3b6de7f9d4066f686d4f08 | d44475655dbe61df07aca3c6417d53b88f84ee52 | /basic/11~/r_study14_word_cloud.R | dbbd5b4035dcb6ac712cdba2c25c0c128568a127 | [] | no_license | bjh1646/R_data | ad564701e651e3e88a0bd91621dbb39ecc283efc | dd46c37cdd8070a4ddab1c7f376fde010b170d68 | refs/heads/master | 2023-08-21T16:05:22.773594 | 2021-08-29T13:08:46 | 2021-08-29T13:08:46 | 392,208,622 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,377 | r | r_study14_word_cloud.R | #워드클라우드 - 텍스트 마이닝
install.packages("rJava")
install.packages("remotes")
remotes::install_github('haven-jeon/KoNLP',upgrade = "never",
INSTALL_opts=c("--no-multiarch"))
library(KoNLP)
#extractNoun 테스트용
txt = "문장에 대한 이해가 필요하다"
extractNoun(txt)
# 1. 데이터 불러오기
txt = readLines("hiphop.txt")
head(txt)
# 2. 특수문자 제거용 환경설정
install.packages("stringr")
library(stringr)
library(KoNLP)
# 3. 특수문자 제거
txt = str_replace_all(txt, "\\W", " ")
# 4. 명사 추출
nouns = extractNoun(txt)
# 5. 명사 list -> 문자열 벡터 변환
# 단어별 빈도표 생성
wordcount = table(unlist(nouns))
library(dplyr)
# 6. 데이터 프레임으로 변환
df_word = as.data.frame(wordcount, stringsAsFactors = F)
# 7. 변수명 수정
df_word = rename(df_word,
word=Var1,
freq = Freq)
# 8. 두글자 이상 단어 추출
df_word = filter(df_word, nchar(word) >= 2)
# 빈도순으로 정렬 후 상위 20개 (확인용)
top_20 = df_word %>%
arrange(desc(freq)) %>%
head(20)
top_20
# 9. 워드클라우드 패키지 설치 및 로드
install.packages("wordcloud")
library(wordcloud)
library(RColorBrewer)
# 10. 색상 지정
pal = brewer.pal(8, "Dark2")
# 11. 난수 고정
set.seed(1234)
# 12.워드 클라우드 만들기
wordcloud(words = df_word$word, #단어
freq = df_word$freq, #빈도
min.freq = 2, #최소 단어 빈도
max.words = 200, #표현 단어 수
random.order = F, #고빈도 단어 중앙에 배치
rot.per = .1, #회전 단어 비율
scale = c(4, 0.3), #단어 크기 범위
colors = pal) #색상목록
# 10. 색상 지정
pal = brewer.pal(9, "Blues")
# 11. 난수 고정
set.seed(1234)
# 12.워드 클라우드 만들기
wordcloud(words = df_word$word, #단어
freq = df_word$freq, #빈도
min.freq = 2, #최소 단어 빈도
max.words = 200, #표현 단어 수
random.order = F, #고빈도 단어 중앙에 배치
rot.per = .1, #회전 단어 비율
scale = c(4, 0.3), #단어 크기 범위
colors = pal) #색상목록
|
1f939aed100cd46e195f812e6bad323d2ca6763b | a6d09284bf52147a3795196061442bafbe52e4ee | /man/mort2020.Rd | 59938414e4cceeae76b5aa3e073c0d4ba54109af | [] | no_license | Owain-S/COVIDYPLL | 2e904e0e767bb17ee4977b210425b1a116bee193 | 83e9fd98bfbc4942ca9ff118b58bca6f6b3ccb2a | refs/heads/master | 2023-08-11T05:07:28.146195 | 2021-09-19T04:43:29 | 2021-09-19T04:43:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 440 | rd | mort2020.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{mort2020}
\alias{mort2020}
\title{mort2020 (2020 national and state-level covid-19 and total deaths)}
\format{
An object of class \code{data.table} (inherits from \code{data.frame}) with 371 rows and 6 columns.
}
\usage{
mort2020
}
\description{
mort2020 (2020 national and state-level covid-19 and total deaths)
}
\keyword{datasets}
|
e21a79cc6f14de98374a2eaafbd28c427bb623fc | 7cda0c765cf3a7d3d7c7af07929195e3a9e48976 | /scicar/05run_bot.R | 61516f30ae3d70d424d71b49fe14d3f005953ce6 | [] | no_license | JonasRieger/corona100d | 0e597987445aa636c950c6a3b8f6f2814836b120 | 08dde14d72ea612cfde2d7535411edc42fd5af86 | refs/heads/main | 2023-07-13T17:45:40.007494 | 2021-08-18T05:32:53 | 2021-08-18T05:32:53 | 307,416,747 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,045 | r | 05run_bot.R | message("run_bot.R")
starttime = Sys.time()
library(data.table)
pro = TRUE
setwd("scraping_articles")
cand = readRDS("diffbot_candidates.rds")
to_diffbot = cand$to_scrape[cand$times_scraped < 4]
if("botdata.rds" %in% list.files()){
tmp = readRDS("botdata.rds")
tmp = tmp[!is.na(tmp$url_text),]
to_diffbot = setdiff(to_diffbot, tmp$url_new_expanded)
}
if(pro){
token = readLines("tokenpro.txt")
}else{
token = readLines("token.txt")
}
#Sys.setlocale("LC_ALL","English")
source("diffbot_function.R")
message(length(to_diffbot), " URLs")
system.time(botdata <- do.call(rbind, lapply(to_diffbot, diffbot, token = token)))
if("botdata.rds" %in% list.files()){
botdata = rbind(tmp, botdata)
}
cand[to_scrape %in% to_diffbot, times_scraped := times_scraped+1]
cand[to_scrape %in% botdata[is.na(url_text), url_new_expanded]
& times_scraped == 4, times_scraped := 5]
saveRDS(cand, file = "diffbot_candidates.rds")
saveRDS(botdata, file = "botdata.rds")
message(difftime(Sys.time(), starttime, units = "hours"), " hours")
gc()
|
af3cfd71f275aecba24943936874bf070e61d4ca | a0830531052bd2330932c3a2c9750326cf8304fc | /vmstools/R/maxRangeCI.r | 7e29653038e416bd4185350c63f0c7d84bbd6ec5 | [] | no_license | mcruf/vmstools | 17d9c8f0c875c2a107cfd21ada94977d532c882d | 093bf8666cdab26d74da229f1412e93716173970 | refs/heads/master | 2021-05-29T20:57:18.053843 | 2015-06-11T09:49:20 | 2015-06-11T09:49:20 | 139,850,057 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,750 | r | maxRangeCI.r |
maxRangeCI <- function(x,y,Time,speed){
#Pre-Calculation to speed up the code
pi180 <- pi/180
cosy1 <- cos(y[1]*pi180)
cosy2 <- cos(y[2]*pi180)
#Calculate maximum distance in km
dmax <- Time/60*sum(speed,na.rm=TRUE)/2*1.852
#Calculate d from Haversine function
d <- distance(x[1],y[1],x[2],y[2])
#Calculate a and b as in Mills et al. 2006 paper
warn<- 0
if(d >= dmax){
warning(paste("Distance too far to reach with current speed estimate ",round(x,3)," ",round(y,3),"\n"))
dmax <- d
warn <- 1
}
a <- dmax/2
b <- sqrt((dmax^2 - d^2)/4)
if(d == 0){
o <- 0
} else {
dx <- (x[2] - x[1])*pi180
dy <- (y[2] - y[1])*pi180
o <- atan2(sin(dx)*cosy2,cosy1*sin(y[2]*pi180)-sin(y[1]*pi180)*cosy2*cos(dx))
angles <- (o*(180/pi)) %% 360
angle2 <- ifelse(angles >= 0 & angles < 180, 90 - angles,270-angles)
o <- angle2*(pi180)
}
#See also: http://www.movable-type.co.uk/scripts/latlong.html
Bx <- cosy2*cos((x[2]-x[1])*pi180)
By <- cosy2*sin((x[2]-x[1])*pi180)
mid.x <- (x[1]*pi180) + atan2(By,cosy1+Bx)
mid.y <- atan2(sin(y[1]*pi180) + sin(y[2]*pi180),sqrt((cosy1+Bx)^2 + By^2))
mid.x <- mid.x*180/pi
mid.y <- mid.y*180/pi
a <- c(km2Degree(mid.x,mid.y,a),a/111.2)
b <- c(km2Degree(mid.x,mid.y,b),b/111.2)
#See also Pfoser and Jensen 1999 Capturing the Uncertainty of Moving-Object representation
u <- 0:360*pi180
xres <- mid.x + a[1] * cos(o) * cos(u) - b[1] * sin(o) * sin(u)
yres <- mid.y + a[2] * sin(o) * cos(u) + b[2] * cos(o) * sin(u)
return(list(matrix(c(xres,yres),ncol=2),dmax,warn))} |
c49bfd7e37c0f13068c1231087bcc7283194d453 | a327678044c68a5ce637085c25136666b8d52969 | /JConst.R | dc67538a657f76395657bedac30a4b0445961574 | [] | no_license | BeiyuLin/caabChange | 02abed4e48e533105e5da3ea62573b6e15d95042 | 36099744348e1bb3449df85bb76522ec8323d69b | refs/heads/master | 2021-01-01T03:51:42.784582 | 2016-05-13T21:37:56 | 2016-05-13T21:37:56 | 58,773,300 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,526 | r | JConst.R |
# Define psychology and smart home sensor varaibles
#
# Author: pdawadi
###############################################################################
set.seed(100);
#---------------------------------------------------Significance level
SIG_LEVEL = as.numeric(0.05);
#---------------------------------------------------Smart Home Activities
#"Work" is excluded from this list since some participants do not "work"
SMARTHOME.ACTITIVIES =c("Sleep","Bed_Toilet_Transition","Leave_Home","Relax","Cook","Eat","Personal_Hygiene","FunctionalHealth")#,
#Standard Clinical Bianual variables
orig.std.var = c("In_PM_ug_m3", "In_Dylos_small", "In_Dylos_large", "In_O3_ppb", "In_NO2_ppb", "In_NO_ppb", "In_Nox_ppb", "In_CO2_ppm",
"In_CH4_ppm", "In_H2O_ppm", "In_Temp_C", "Out_PM25_ug_m3", "Out_CO2_ppm", "Out_H2O_ppt", "Out_NO_ppb", "Out_NO2_ppb",
"Out_Nox_ppb", "Out_O3_ppb", "Garage_Temp_C", "WS_pressure_mbar", "WS_temp_C", "WS_RH", "WS_Dew_C", "WS_WD_true_deg",
"WS_Windspeed_m_s", "In_m31_ppb", "In_m33_ppb", "In_m42_ppb", "In_m45_ppb", "In_m59_ppb", "In_m69_ppb", "In_m71_ppb",
"In_m73_ppb", "In_m79_ppb", "In_m81_ppb", "In_m93_ppb", "In_m105_ppb", "In_m107_ppb", "In_m121_ppb", "In_m135_ppb",
"In_m137_ppb", "Out_m31_ppb", "Out_m33_ppb", "Out_m42_ppb", "Outm45_ppb", "Out_m59_ppb", "Out_m69_ppb", "Out_m71_ppb",
"Out_m73_ppb", "Out_m79_ppb", "Out_m81_ppb", "Out_m93_ppb", "Out_m105_ppb", "Out_m107_ppb", "Out_m121_ppb", "Out_m135_ppb", "Out_m137_ppb");
#orig.std.var = c("In_CO2_ppm","In_Nox_ppb","tugtrial","DepStotal")#,"rbansisdelmem"
#orig.std.var = c("rbansistor","MMAAtotal","tugtrial","DepStotal")#,"rbansisdelmem"
psych.std.var = c(orig.std.var)
#----------------------------------------------------SH.VAR
#Sleep
sleep.features = c( "Sleep.duration","Sleep.freqn");
bed.features = c("Bed_Toilet_Transition.duration")#,"Bed_Toilet_Transition.freqn");
#ADL VAR
#Note : Most frequency metrics for ADL are zeros so I removed them from the feature vectors
leave_home.features= c("Leave_Home.duration" ) #, "Leave_Home.freqn");
relax.features=c("Relax.duration") #,"Relax.freqn");
cook.features=c("Cook.duration") #,"Cook.freqn");
eat.features= c( "Eat.duration" ) #, "Eat.freqn");
personal_hygiene.features = c("Personal_Hygiene.duration") #,"Personal_Hygiene.freqn");
adl.features =c(leave_home.features,relax.features,cook.features,eat.features,personal_hygiene.features)
#SD variables Mobility
#technically these are #counts and distance covered. I renamed since they are renamed as so in features
mobility.features = c( "FunctionalHealth.duration", "FunctionalHealth.freqn");
SH.features =c(sleep.features,bed.features,adl.features,mobility.features);
#----------------------------------------------------Change Features
sleep.change =c("sleep_durn.change","sleep_freq.change", "bed.change");
mobility.change =c("mobility.durn.change","mobility.freqn.change");
adl.change = c("leave_home.change","relax.change","cook.change","eat.change","personal_hygiene.change");
sh.change = c(sleep.change,adl.change,mobility.change);
#----------------------------------------------------Skewnewss Features
sleep.sk =c("sleep_durn.sk", "sleep_freq.sk","bed.sk");
mobility.sk =c("mobility.durn.sk","mobility.freqn.sk");
adl.sk = c("leave_home.sk","relax.sk","cook.sk","eat.sk","personal_hygiene.sk");
sh.sk = c(sleep.sk,adl.sk,mobility.sk);
#----------------------------------------------------Autocorrelation features
sleep.acf =c("sleep_durn.acf", "sleep_freq.acf","bed.acf");
mobility.acf =c("mobility.durn.acf","mobility.freqn.acf");
adl.acf = c("leave_home.acf","relax.acf","cook.acf","eat.acf","personal_hygiene.acf");
sh.acf = c(sleep.acf,adl.acf,mobility.acf);
#----------------------------------------------------Standard deviation features
sleep.var =c("sleep.var", "sleep_freq.var", "bed.var");
mobility.var =c("mobility.durn.var","mobility.freqn.var");
adl.var = c("leave_home.var","relax.var","cook.var","eat.var","personal_hygiene.var");
sh.var = c(sleep.var,adl.var,mobility.var);
#----------------------------------------------------Kurtosis features
sleep.kurt =c("sleep.kurt", "sleep_freq.kurt", "bed.kurt");
mobility.kurt =c("mobility.durn.kurt","mobility.freqn.kurt");
adl.kurt = c("leave_home.kurt","relax.kurt","cook.kurt","eat.kurt","personal_hygiene.kurt");
sh.kurt = c(sleep.kurt,adl.kurt,mobility.kurt);
dataset.features = c(sh.change,sh.sk,sh.acf,sh.var)
|
304d1b02ac1a28091f48edce97db711825d419c3 | c36077f78bc38c9304898f5b5e0a7e3295b11588 | /offline_odin_mapping_INTERPOLATE.R | dd9d5fdef0a1e859b76dbb42e3328b1cbfe69388 | [
"MIT"
] | permissive | guolivar/motueka2019-odin-analysis | 569e763032c5855d71b94a9c60d3bb1b75764e1f | 2614269d71f3f25c63171e92e7dc09cfe4235115 | refs/heads/master | 2020-05-25T12:58:59.908766 | 2019-12-22T21:36:36 | 2019-12-22T21:36:36 | 187,810,894 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,109 | r | offline_odin_mapping_INTERPOLATE.R | ###################################################
# Spatial averages and plots for ODIN data from
# Motueka
# Author: Gustavo Olivares
###################################################
#' ---
#' title: "Mapping ODIN-SD data from Motueka 2019"
#' author: "Gustavo Olivares"
#' ---
# Libraries setup
library(readr)
library(openair)
library(ggmap)
library(ggplot2)
library(sp)
library(rgdal)
library(raster)
library(scales)
library(gstat)
library(RNetCDF)
# library(reshape2)
# library(automap)
# library(RJSONIO)
# library(curl)
# library(base64enc)
# library(zoo)
# library(stringi)
# library(viridis)
# library(dplyr)
# library(RColorBrewer)
# library(purrr)
# library(magick)
##### Register google's mapping key
register_google(key = "AIzaSyACi3pNvPQTxZWx5u0nTtke598dPqdgySg")
##### Load data ####
##### Set the working directory DB ####
work_path <- path.expand("~/repositories/motueka2019-odin-analysis/mapping/")
setwd(work_path)
data_path <- path.expand("./")
# Some constants #####
proj4string_NZTM <- CRS('+init=epsg:2193')
proj4string_latlon <- CRS('+init=epsg:4326')
proj4string <- "+proj=tmerc +lat_0=0.0 +lon_0=173.0 +k=0.9996 +x_0=1600000.0 +y_0=10000000.0 +datum=WGS84 +units=m"
# Get the list of locations
odin_locations <- read_delim(paste0(data_path,"odin_locations.txt"),
"\t",
escape_double = FALSE,
trim_ws = TRUE)
# Get ODIN LONG dataset
odin.1hr <- read_delim(paste0(data_path,"data/tdc_hourly_longPM25.csv"),
",",
escape_double = FALSE,
trim_ws = TRUE,
col_types = 'cTn')
# Match location
odin.1hr$X <- NA
odin.1hr$Y <- NA
odin.1hr$lat <- NA
odin.1hr$lon <- NA
for (odin in (1:length(odin_locations$ID))) {
indx_id <- which(odin.1hr$site == paste0("SD",odin_locations$ID[odin]))
odin.1hr$X[indx_id] <- odin_locations$X[odin]
odin.1hr$Y[indx_id] <- odin_locations$Y[odin]
p <- project(cbind(odin_locations$X[odin],odin_locations$Y[odin]),proj = proj4string,inv = TRUE)
odin.1hr$lat[indx_id] <- p[2]
odin.1hr$lon[indx_id] <- p[1]
}
#Remove data without location
odin.1hr <- subset(odin.1hr,!is.na(odin.1hr$site))
names(odin.1hr) <- c('site','date','pm','X','Y','lat','lon')
# Build average map
curr_data <- data.frame(ODIN = unique(odin.1hr$site))
curr_data$lat <- NA
curr_data$lon <- NA
curr_data$PM2.5 <- NA
curr_data$PM2.5min <- NA
curr_data$PM2.5max <- NA
ndev <- length(curr_data$ODIN)
for (i in (1:ndev)){
loc_id <- which(odin_locations$ID==substr(curr_data$ODIN[i],3,6))
if (length(loc_id)>0){
p <- project(cbind(odin_locations$X[loc_id],odin_locations$Y[loc_id]),proj = proj4string,inv = TRUE)
curr_data$lon[i] <- p[1]
curr_data$lat[i] <- p[2]
curr_data$PM2.5[i] <- mean(subset(odin.1hr,site == curr_data$ODIN[i])$pm,na.rm = TRUE)
if(!is.finite(curr_data$PM2.5[i])){
curr_data$PM2.5[i] <- NaN
}
curr_data$PM2.5min[i] <- min(subset(odin.1hr,site == curr_data$ODIN[i])$pm,na.rm = TRUE)
if(!is.finite(curr_data$PM2.5min[i])){
curr_data$PM2.5min[i] <- NaN
}
curr_data$PM2.5max[i] <- max(subset(odin.1hr,site == curr_data$ODIN[i])$pm,na.rm = TRUE)
if(!is.finite(curr_data$PM2.5max[i])){
curr_data$PM2.5max[i] <- NaN
}
}
}
centre_lat <- mean(curr_data$lat,na.rm = TRUE)
centre_lon <- mean(curr_data$lon,na.rm = TRUE)
## Prepare the map to plot animations #####
# Get the basemap
ca <- get_googlemap(c(centre_lon,centre_lat),
zoom = 14,
scale = 2)
# Plot campaign average map
map_out <- ggmap(ca) +
geom_point(data=curr_data,
aes(x=lon,
y=lat,
colour = PM2.5),
size = 5) +
scale_color_gradient(low="white",
high="red",
limits=c(min(curr_data$PM2.5,na.rm = TRUE),
max(curr_data$PM2.5,na.rm = TRUE)),
name = "PM2.5")
ggsave(filename=paste0(data_path,
'campaign_average_Motueka.png'),
plot=map_out,
width=10,
height=10,
units = "in")
# Plot campaign max map
map_out <- ggmap(ca) +
geom_point(data=curr_data,
aes(x=lon,
y=lat,
colour = PM2.5max),
size = 5) +
scale_color_gradient(low="white", high="red",
limits=c(min(curr_data$PM2.5max,na.rm = TRUE),
max(curr_data$PM2.5max,na.rm = TRUE)),
name = "PM2.5")
ggsave(filename=paste0(data_path,
'HourlyAVG_maximum_Motueka.png'),
plot=map_out,
width=10,
height=10,
units = "in")
# Plot campaign min map
map_out <- ggmap(ca) +
geom_point(data=curr_data,
aes(x=lon,
y=lat,
colour = PM2.5min),
size = 5) +
scale_color_gradient(low="white", high="red",
limits=c(min(curr_data$PM2.5min,na.rm = TRUE),
max(curr_data$PM2.5min,na.rm = TRUE)),
name = "PM2.5")
ggsave(filename=paste0(data_path,
'HourlyAVG_minimum_Motueka.png'),
plot=map_out,
width=10,
height=10,
units = "in")
# Plot time series
plot_tseries <- ggplot(data.frame(odin.1hr),
aes(x=date)) +
geom_line(aes(y=pm,
colour=site)) +
ylab('PM2.5')
ggsave(filename = paste0(data_path,
'timeseries/t_series.png'),
plot = plot_tseries,
width = 12,
height = 6,
units = 'in')
## Plot hourly maps INTERPOLATED #####
# Remove NAs from curr_data and odin.1hr
curr_data <- subset(curr_data,!is.na(curr_data$lat))
odin.1hr <- subset(odin.1hr,!is.na(odin.1hr$lat))
# Set the coordinates for the objects
coordinates(curr_data) <- ~ lon + lat
proj4string(curr_data) <- proj4string_latlon
coordinates(odin.1hr) <- ~ lon + lat
proj4string(odin.1hr) <- proj4string_latlon
# Re-project to NZTM #####
odin.1hr <- spTransform(odin.1hr,proj4string_NZTM)
print("Starting the interpolation")
#Setting the prediction grid properties #####
cellsize <- 50 #pixel size in projection units (NZTM, i.e. metres)
min_x <- odin.1hr@bbox[1,1] - cellsize - 500 #minimun x coordinate 500m south
min_y <- odin.1hr@bbox[2,1] - cellsize - 500 #minimun y coordinate 500m west
max_x <- odin.1hr@bbox[1,2] + cellsize + 500 #mximum x coordinate 500m north
max_y <- odin.1hr@bbox[2,2] + cellsize + 500 #maximum y coordinate 500m east
x_length <- max_x - min_x #easting amplitude
y_length <- max_y - min_y #northing amplitude
ncol <- round(x_length/cellsize,0) #number of columns in grid
nrow <- round(y_length/cellsize,0) #number of rows in grid
grid <- GridTopology(cellcentre.offset=c(min_x,min_y),cellsize=c(cellsize,cellsize),cells.dim=c(ncol,nrow))
#Convert GridTopolgy object to SpatialPixelsDataFrame object. #####
grid <- SpatialPixelsDataFrame(grid,
data=data.frame(id=1:prod(ncol,nrow)),
proj4string=proj4string_NZTM)
# Get rid of NA containing rows
odin.1hr <- subset(odin.1hr,!is.na(pm))
all_dates <- sort(unique(odin.1hr$date))
valid_dates <- FALSE * (1:length(all_dates))
# limits for colorscales #####
cmin <- 0
cmax <- 120
ndates <- length(all_dates)
i <- 0
d_slice <- 3
for (d_slice in (1:ndates)){
c_data <- subset(odin.1hr,subset = (date==all_dates[d_slice]))
print(format(all_dates[d_slice]+12*3600,
format = "%Y-%m-%d %H:%M"))
if (length(unique(c_data$site))<2){
next
}
valid_dates[d_slice] <- TRUE
surf.idw <- idw(pm ~ 1,newdata = grid, locations = c_data, idp = 1,na.action = na.omit)
surf.idw$timestamp <-d_slice
proj4string(surf.idw) <- proj4string_NZTM
if (i==0){
to_rast.idw <- surf.idw
r0.idw <- rasterFromXYZ(cbind(surf.idw@coords,surf.idw$var1.pred))
crs(r0.idw) <- '+init=epsg:2193'
raster_cat.idw<- r0.idw
i <- 1
} else {
to_rast.idw <- surf.idw
r0.idw <- rasterFromXYZ(cbind(surf.idw@coords,surf.idw$var1.pred))
names(r0.idw) <- as.character(all_dates[d_slice])
crs(r0.idw) <- '+init=epsg:2193'
raster_cat.idw<- addLayer(raster_cat.idw,r0.idw)
}
rtp <- rasterToPolygons(projectRaster(r0.idw,crs = "+proj=longlat +datum=WGS84"))
points <- data.frame(spTransform(c_data,CRS('+init=epsg:4326')))
points$label <- substr(points$site,3,6)
map_out <- ggmap(ca) + geom_polygon(data = rtp,aes(x = long, y = lat, group = group,
fill = rep(rtp[[1]], each = 5)),
size = 0,
alpha = 0.85) +
scale_fill_gradient(low="white", high="red",limits=c(0, cmax), name = "PM2.5", oob=squish) +
# geom_point(data=points,aes(x=lon,y=lat),colour = "black", size = 3) +
# geom_text(data=points,aes(x=lon,y=lat,label=label), hjust=0, colour = "gray") +
ggtitle(paste(as.character(all_dates[d_slice]+12*3600),"NZST"))
ggsave(filename=paste0(data_path,
'idw/',
format(all_dates[d_slice]+12*3600,
format = "%Y-%m-%d %H:%M"),
'.png'),
plot=map_out,
width=6,
height=6,
units = "in")
}
save('raster_cat.idw',file = paste0(data_path,'raster_cat.idw.RData'))
print("Done with interpolating ...")
raster_cat_idw_LL <- projectRaster(raster_cat.idw,crs = "+proj=longlat +datum=WGS84")
save(list = c('raster_cat_idw_LL'),file = paste0(data_path,"raster_odin_LL.RData"))
print("Writing NetCDF files")
print("IDW")
# Write NetCDF files ####
# IDW
lat_dim <- unique(coordinates(raster_cat_idw_LL)[,2])
lon_dim <- unique(coordinates(raster_cat_idw_LL)[,1])
tim_dim <- all_dates[valid_dates ==1 ]
nc.idw <- create.nc("odin_PM25.nc")
# Dimensions specifications
dim.def.nc(nc.idw, "time", unlim=TRUE)
dim.def.nc(nc.idw, "latitude",length(lat_dim))
dim.def.nc(nc.idw, "longitude",length(lon_dim))
# Variable specifications
var.def.nc(nc.idw,"time","NC_INT","time")
att.put.nc(nc.idw,"time","units","NC_CHAR","seconds since 1970-01-01 00:00:0.0")
att.put.nc(nc.idw,"time","long_name","NC_CHAR","time")
var.def.nc(nc.idw,"latitude","NC_FLOAT","latitude")
att.put.nc(nc.idw,"latitude","units","NC_CHAR","degrees_north")
att.put.nc(nc.idw,"latitude","long_name","NC_CHAR","latitude")
att.put.nc(nc.idw,"latitude","standard_name","NC_CHAR","latitude")
var.def.nc(nc.idw,"longitude","NC_FLOAT","longitude")
att.put.nc(nc.idw,"longitude","units","NC_CHAR","degrees_east")
att.put.nc(nc.idw,"longitude","long_name","NC_CHAR","longitude")
att.put.nc(nc.idw,"longitude","standard_name","NC_CHAR","longitude")
var.def.nc(nc.idw,"pm2p5","NC_FLOAT",c("longitude","latitude","time"))
att.put.nc(nc.idw,"pm2p5","units","NC_CHAR","ug m**-3")
att.put.nc(nc.idw,"pm2p5","long_name","NC_CHAR","Mass concentration of PM2.5 ambient aerosol particles in air")
att.put.nc(nc.idw,"pm2p5","standard_name","NC_CHAR","mass_concentration_of_pm2p5_ambient_aerosol_particles_in_air")
att.put.nc(nc.idw,"pm2p5","cell_methods","NC_CHAR","time: mean (interval: 15 minutes)")
att.put.nc(nc.idw,"pm2p5","missing_value","NC_FLOAT",-999.9)
# Global attributes
att.put.nc(nc.idw,"NC_GLOBAL","title","NC_CHAR","PM2.5 interpolated surface (Inverse Square Distance)")
att.put.nc(nc.idw,"NC_GLOBAL","Conventions","NC_CHAR","CF-1.7")
att.put.nc(nc.idw,"NC_GLOBAL","Institution","NC_CHAR","NIWA (National Institute of Water and Atmospheric Research, Auckland, New Zealand)")
att.put.nc(nc.idw,"NC_GLOBAL","project_id","NC_CHAR","CONA - 2018")
att.put.nc(nc.idw,"NC_GLOBAL","history","NC_CHAR",paste0(format(max(odin.1hr$date),format = "%Y%m%d"),
" Data generated and formatted"))
att.put.nc(nc.idw,"NC_GLOBAL","comment","NC_CHAR","Data for visualisation only")
# Load data
var.put.nc(nc.idw,"latitude",lat_dim)
var.put.nc(nc.idw,"longitude",lon_dim)
var.put.nc(nc.idw,"time",as.numeric(tim_dim))
rast_data <- getValues(raster_cat_idw_LL)[,(1:length(tim_dim))]
dim(rast_data) <- c(length(lon_dim),
length(lat_dim),
length(tim_dim))
var.put.nc(nc.idw,"pm2p5",rast_data)
# Close the file and save
close.nc(nc.idw)
## Create MP4 video ####
print("Create videos")
system(paste0("ffmpeg -f image2 -r 6 -pattern_type glob -i '",
data_path,
"idw/",
"*.png' ",
data_path,
"idw/all_PM25.mp4"))
## Upload to youtube ####
print("Upload IDW to youtube")
system(paste0("youtube-upload --title=\"Motueka PM10\" --privacy=unlisted --client-secrets=./client_secrets.json ",
data_path,
"idw/all_PM25.mp4",
" --playlist=\"Motueka 2019 - ODIN\""))
# Upload files
print("Upload NC files")
RCurl::ftpUpload(paste0(data_path,"odin_PM25.nc"),
"ftp://ftp.niwa.co.nz/incoming/GustavoOlivares/odin_motueka/odin_PM25_1hr_MOTUEKA.nc")
|
772122fc69a8b0b546599eb391fc0324947d0df7 | aa061f979e2e17d16231ab7d9f49a64fc6dc2f5e | /man/assign_parameters.curves.Rd | 39e0089291f52a8b3731246f762d31687994336d | [] | no_license | SumathyS/swmmr | 6f48ea2529bc021c3c8dd9da78a2c57c7f14311b | 333f8f46451b7c5ac398b9cdbcd0ed9a08626f70 | refs/heads/master | 2020-05-29T09:36:39.343829 | 2019-03-26T11:08:41 | 2019-03-26T11:08:41 | 189,070,425 | 2 | 0 | null | 2019-05-28T17:07:14 | 2019-05-28T17:07:13 | null | UTF-8 | R | false | true | 421 | rd | assign_parameters.curves.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/assign_parameters.R
\name{assign_parameters.curves}
\alias{assign_parameters.curves}
\title{conversion helper}
\usage{
\method{assign_parameters}{curves}(x, infiltration = NULL,
subcatchment = NULL, subcatchment_typologies = NULL,
conduit_material = NULL, junction_parameters = NULL)
}
\description{
conversion helper
}
\keyword{internal}
|
1d9f4be4301cca8644f464f3c2499c22dd7c8f7f | 3fc3964396f8010aae9345d37f551c4431c52ff9 | /R/nu_correct.R | 1a6e1ff35215b5cb3c26fa77430229a9db714ae3 | [] | no_license | muschellij2/freesurfer | ff96f465ebbfbb0b7ce18644be5f4c5ea753fc45 | 7d70f616e760d8d3a453a652d98756e34877fed7 | refs/heads/master | 2021-06-24T00:57:12.644687 | 2020-12-08T18:41:34 | 2020-12-08T18:41:34 | 67,370,835 | 9 | 8 | null | 2020-11-15T23:42:38 | 2016-09-04T22:12:47 | R | UTF-8 | R | false | false | 2,046 | r | nu_correct.R | #' @title Use Freesurfers Non-Uniformity Correction
#' @description This function calls \code{nu_correct}
#' to correct for non-uniformity
#' @param file (character) input filename
#' @param mask (character or nifti) Mask to use for correction.
#' @param opts (character) additional options to \code{mri_segment}
#' @param verbose print diagnostic messages
#' @param ... additional arguments passed to \code{\link{fs_cmd}}.
#' @return Object of class nifti depending on \code{retimg}
#' @importFrom neurobase parse_img_ext
#' @export
#' @examples \dontrun{
#' if (have_fs()){
#' nu_correct("/path/to/T1.nii.gz")
#' }
#' }
nu_correct = function(
file,
mask = NULL,
opts = "",
verbose = TRUE,
...){
file = checkimg(file)
ext = neurobase::parse_img_ext(file)
infile = file
if (ext %in% c("nii", "nii.gz")) {
infile = nii2mnc(file)
}
# no.outfile = FALSE
# if (is.null(outfile)) {
outfile = tempfile(fileext = ".nii")
# no.outfile = TRUE
# }
out_ext = neurobase::parse_img_ext(outfile)
if ( !(ext %in% c("nii", "mnc"))) {
stop("outfile extension must be nii/nii.gz or mnc")
}
tmpfile = tempfile(fileext = ".mnc")
opts = trimws(opts)
if (!is.null(mask)) {
mask = ensure_mnc(mask)
opts = paste0(opts, " -mask ", shQuote(mask))
}
if (!verbose) {
opts = paste0(opts, " -quiet")
}
fs_cmd(
func = "nu_correct",
file = infile,
outfile = tmpfile,
frontopts = opts,
retimg = FALSE,
samefile = FALSE,
add_ext = FALSE,
verbose = verbose,
bin_app = "mni/bin",
...)
if (out_ext == "nii") {
outfile = mnc2nii(tmpfile, outfile = outfile)
outfile = readnii(outfile)
} else {
file.copy(from = tmpfile, to = outfile, overwrite = TRUE)
}
return(outfile)
}
#' @title Non-Uniformity Correction Help
#' @description This calls Freesurfer's \code{nu_correct} help
#'
#' @return Result of \code{fs_help}
#' @export
nu_correct.help = function(){
fs_help(func_name = "nu_correct", help.arg = "-help", bin_app = "mni/bin")
} |
19ba5ee0ffd907dfe944499861b0a92f3f309cd4 | ac087dd303df1cbf93b471f8256f17bd3bb32556 | /R/FactoGPA.R | 88c821524aa5668a527005fcb60b12bbb18528ac | [] | no_license | cran/RcmdrPlugin.FactoMineR | ca29bbce1af452bc4573c72c0a014f2b2a97ecd3 | 8f7b54c71d76c1e363f616832f1470b833c6061c | refs/heads/master | 2021-01-22T03:39:42.975682 | 2020-02-03T07:00:02 | 2020-02-03T07:00:02 | 17,718,054 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 38,954 | r | FactoGPA.R | FactoGPA <-
function()
{
# Fonction pour la gestion des noms ############################################
nom.correct<-function(text, liste=NULL)
{
text<-chartr("^\ ", "...", text)
if(!is.null(liste)) {
while(text %in% liste) {
text<-paste(text, ".bis", sep="")
}
}
return(text)
}
################################################################################
# Création des fonctions pour les options via nouvelle fenêtre graphique
#! suppression de groupes quantitatif
supprimeQuanti.funct<-defmacro(label, expr=
{
env<-environment()
OnSGQ<-function()
{
grpeActSupprime<-listQuantiAct.nom[as.numeric(tkcurselection(listQuantiAct))+1]
if(length(grpeActSupprime)>=1)
{
listQuantiAct.nom.tmp<-listQuantiAct.nom[-which(listQuantiAct.nom %in% grpeActSupprime)]
assign("listQuantiAct.nom",listQuantiAct.nom.tmp, envir=env)
tkdelete(listQuantiAct,"0","end")
if(length(listQuantiAct.nom)>=1) {
for (grpe in listQuantiAct.nom) tkinsert(listQuantiAct, "end", grpe)
}
}
nb.grpe<-length(listQuantiAct.nom)
if (nb.grpe>1) {
tclvalue(label.quantiFrame.var)<-paste(nb.grpe, .Facto_gettext("groups"), sep=" ")
tkconfigure(label.quantiFrame)
}
else
{
tclvalue(label.quantiFrame.var)<-paste("0", .Facto_gettext("group"), sep=" ")
tkconfigure(label.quantiFrame)
}
}
SupGpeQuantiFrame<-tkframe(ListeQuantiFrame)
SupGpeQuanti.but<-tkbutton(SupGpeQuantiFrame, textvariable=tclVar(label), command=OnSGQ, borderwidth=3)
tkgrid(SupGpeQuanti.but, sticky="ew")
})
#! Ajout d'un groupe quantitatif
ajoutQuanti.funct<-defmacro(label, firstLabel, expr=
{
env<-environment()
compteur.GQ<-1
.AjoutQuantiLabel<-tclVar(paste(firstLabel, "", sep=" "))
OnAGQ<-function()
{
AjoutGpeQuantiWin<-tktoplevel()
tkwm.title(AjoutGpeQuantiWin,.Facto_gettext("Definition of a group"))
#création de la fonction AGA.OK
AGQ.OK<-function()
{
assign("compteur.GQ", compteur.GQ+1, envir=env)
nom.groupe<-nom.correct(tclvalue(nomGrpeQuanti.val), liste=c(listQuantiAct.nom))
if (nom.groupe=="") tkmessageBox(message=.Facto_gettext("Name for the group"), icon="warning", type="ok")
else {
varGroupe<-listVarQuanti.nom[as.numeric(tkcurselection(listVarQuanti))+1]
if (length(varGroupe)>=1) {
assign(paste(nom.groupe,".var", sep=""), c(varGroupe), envir=env)
tkinsert(listQuantiAct,"end",nom.groupe)
assign("listQuantiAct.nom", c(listQuantiAct.nom, nom.groupe),envir=env)
if (length(listQuantiAct.nom)==1) tclvalue(label.quantiFrame.var)<-paste(.Facto_gettext("1 group"), sep=" ")
else tclvalue(label.quantiFrame.var)<-paste(length(listQuantiAct.nom) , .Facto_gettext("groups"), sep=" ")
tkconfigure(label.quantiFrame)
tkdestroy(AjoutGpeQuantiWin)
}
}
}
# choix du nom du groupe
nomGrpeQuanti.lab<-tklabel(AjoutGpeQuantiWin,text=.Facto_gettext("Name of the group: "))
nomGrpeQuanti.val<-tclVar(paste("Gc", compteur.GQ, sep=""))
nomGrpeQuanti<-tkentry(AjoutGpeQuantiWin,width=15,textvariable=nomGrpeQuanti.val)
# création de la liste pour le choix des variables acives
listVarQuanti<-tklistbox(AjoutGpeQuantiWin, selectmode="extended",exportselection="FALSE",yscrollcommand=function(...)tkset(scrVarQuanti,...))
scrVarQuanti<-tkscrollbar(AjoutGpeQuantiWin,repeatinterval=5,command=function(...)tkyview(listVarQuanti,...))
listVarQuanti.nom<-NULL
for (i in (1:ncol(donnee))) {
if (is.numeric(donnee[,i])) {
tkinsert(listVarQuanti,"end",vars[i])
listVarQuanti.nom<-c(listVarQuanti.nom, vars[i])
}
}
AGQ.but<-tkbutton(AjoutGpeQuantiWin, text="OK", width=16, command=AGQ.OK)
tkgrid(nomGrpeQuanti.lab, nomGrpeQuanti)
tkgrid.configure(nomGrpeQuanti.lab, column=0, columnspan=2, sticky="w")
tkgrid.configure(nomGrpeQuanti, column=2, columnspan=3)
tkgrid(tklabel(AjoutGpeQuantiWin, text=""))
tkgrid(tklabel(AjoutGpeQuantiWin, text = .Facto_gettext("Select the variables for the group"), fg = "blue"), column=0, columnspan = 5, sticky = "w")
tkgrid(listVarQuanti, scrVarQuanti, sticky = "nw")
tkgrid.configure(scrVarQuanti, sticky = "wns", column=4,columnspan=1)
tkgrid.configure(listVarQuanti, sticky = "ew", column=0, columnspan=4)
tkgrid(tklabel(AjoutGpeQuantiWin, text=""))
tkgrid(AGQ.but, column=2,columnspan=1, sticky="ew")
tkgrid.columnconfigure(AjoutGpeQuantiWin,0, minsize=55)
tkgrid.columnconfigure(AjoutGpeQuantiWin,1, minsize=55)
tkgrid.columnconfigure(AjoutGpeQuantiWin,2, minsize=55)
tkgrid.columnconfigure(AjoutGpeQuantiWin,3, minsize=55)
tkgrid.columnconfigure(AjoutGpeQuantiWin,4, minsize=55)
}
GpeQuantiFrame<-tkframe(ListeQuantiFrame)
AjoutGpeQuanti.but<-tkbutton(GpeQuantiFrame, textvariable=.AjoutQuantiLabel, command=OnAGQ, borderwidth=3)
tkgrid(AjoutGpeQuanti.but, sticky="ew")
})
#! Modification d'un groupe quantitatif
modifQuanti.funct<-defmacro(label, firstLabel, expr=
{
env<-environment()
.ModifQuantiLabel<-tclVar(paste(firstLabel, "", sep=" "))
OnMGQ<-function() {
ModifGpeQuantiWin<-tktoplevel()
tkwm.title(ModifGpeQuantiWin,.Facto_gettext("Modification of a group"))
#création de la fonction AGA.OK
MGQ.OK<-function() {
nom.groupe<-nom.correct(tclvalue(nomModifGrpeQuanti.val), liste=c(listQuantiAct.nom))
if (nom.groupe=="") tkmessageBox(message=.Facto_gettext("Name for the group"), icon="warning", type="ok")
else {
listQuantiAct.nom.tmp<-listQuantiAct.nom[-which(listQuantiAct.nom== grpeAModifier)]
assign("listQuantiAct.nom",listQuantiAct.nom.tmp, envir=env)
tkdelete(listQuantiAct,"0","end")
for (grpe in listQuantiAct.nom) tkinsert(listQuantiAct, "end", grpe)
varGroupe<-listModifVarQuanti.nom[as.numeric(tkcurselection(listModifVarQuanti))+1]
if (length(varGroupe)>=1) {
assign(paste(nom.groupe,".var", sep=""), c(varGroupe), envir=env)
tkinsert(listQuantiAct,"end",nom.groupe)
assign("listQuantiAct.nom", c(listQuantiAct.nom, nom.groupe),envir=env)
tkdestroy(ModifGpeQuantiWin)
}
}
}
if(length(as.numeric(tkcurselection(listQuantiAct)))>=1) grpeAModifier<-listQuantiAct.nom[as.numeric(tkcurselection(listQuantiAct))+1][1]
else {
tkdestroy(ModifGpeQuantiWin)
return()
}
eval(parse(text=paste("grpeAModifier.var<-",paste(grpeAModifier,".var", sep=""),sep="")))
# choix du nom du groupe
nomModifGrpeQuanti.lab<-tklabel(ModifGpeQuantiWin,text=.Facto_gettext("Name of the group: "))
nomModifGrpeQuanti.val<-tclVar(grpeAModifier)
nomModifGrpeQuanti<-tkentry(ModifGpeQuantiWin,width=15,textvariable=nomModifGrpeQuanti.val)
# création de la liste pour le choix des variables acives
listModifVarQuanti<-tklistbox(ModifGpeQuantiWin, selectmode="extended",exportselection="FALSE",yscrollcommand=function(...)tkset(scrModifVarQuanti,...))
scrModifVarQuanti<-tkscrollbar(ModifGpeQuantiWin,repeatinterval=5,command=function(...)tkyview(listModifVarQuanti,...))
listModifVarQuanti.nom<-NULL
indice.num<-0
for (i in (1:ncol(donnee))) {
if (is.numeric(donnee[,i])) {
tkinsert(listModifVarQuanti,"end",vars[i])
listModifVarQuanti.nom<-c(listModifVarQuanti.nom, vars[i])
if(vars[i] %in% grpeAModifier.var) tkselection.set(listModifVarQuanti, indice.num)
indice.num<-indice.num+1
}
}
MGQ.but<-tkbutton(ModifGpeQuantiWin, text="OK", width=16, command=MGQ.OK)
tkgrid(nomModifGrpeQuanti.lab, nomModifGrpeQuanti)
tkgrid.configure(nomModifGrpeQuanti.lab, column=0, columnspan=2, sticky="w")
tkgrid.configure(nomModifGrpeQuanti, column=2, columnspan=3)
tkgrid(tklabel(ModifGpeQuantiWin, text=""))
tkgrid(tklabel(ModifGpeQuantiWin, text = .Facto_gettext("Select the variables of the group"), fg = "blue"), column=0, columnspan = 5, sticky = "w")
tkgrid(listModifVarQuanti, scrModifVarQuanti, sticky = "nw")
tkgrid.configure(scrModifVarQuanti, sticky = "wns", column=4,columnspan=1)
tkgrid.configure(listModifVarQuanti, sticky = "ew", column=0, columnspan=4)
tkgrid(tklabel(ModifGpeQuantiWin, text=""))
tkgrid(MGQ.but, column=2,columnspan=1, sticky="ew")
tkgrid.columnconfigure(ModifGpeQuantiWin,0, minsize=55)
tkgrid.columnconfigure(ModifGpeQuantiWin,1, minsize=55)
tkgrid.columnconfigure(ModifGpeQuantiWin,2, minsize=55)
tkgrid.columnconfigure(ModifGpeQuantiWin,3, minsize=55)
tkgrid.columnconfigure(ModifGpeQuantiWin,4, minsize=55)
}
ModifGpeQuantiFrame<-tkframe(ListeQuantiFrame)
ModifGpeQuanti.but<-tkbutton(ModifGpeQuantiFrame, textvariable=.ModifQuantiLabel, command=OnMGQ, borderwidth=3)
tkgrid(ModifGpeQuanti.but, sticky="ew")
})
#! fonction pour la réinitialisation des paramètre
Reinitializ.funct<-function()
{
tkdestroy(top)
FactoGPA()
}
#! fonction pour le choix des éléments de sortie
Sortie.funct<-defmacro(label, firstLabel, expr=
{
env<-environment()
compteur.sortie<-0
#déclaration des variables
RFichier <- ""
Rdep<-FALSE
RRVs<-FALSE
Rsimi<-FALSE
Rscaling<-FALSE
Rconsensus<-TRUE
RPANOVA<-FALSE
RXfin<-FALSE
Rcorrelations<-FALSE
RRV<-TRUE
.SortieLabel<-tclVar(paste(firstLabel, "", sep=" "))
OnSortie<-function()
{
SortieWin<-tktoplevel()
tkwm.title(SortieWin,.Facto_gettext("Output options"))
#création de la fonction onOKsub
onOK.sortie<-function()
{
assign("compteur.sortie", compteur.sortie+1, envir=env)
if(compteur.sortie>0) tclvalue(.SortieLabel)<-paste(label, "", sep=" ")
tkconfigure(Sortie.but, fg="blue")
if(tclvalue(depValue)=="1") assign("Rdep", TRUE, envir=env)
else assign("Rdep", FALSE, envir=env)
if(tclvalue(RVsValue)=="1") assign("RRVs", TRUE, envir=env)
else assign("RRVs", FALSE, envir=env)
if(tclvalue(simi.Value)=="1") assign("Rsimi", TRUE, envir=env)
else assign("Rsimi", FALSE, envir=env)
if(tclvalue(scalingValue)=="1") assign("Rscaling", TRUE, envir=env)
else assign("Rscaling", FALSE, envir=env)
if(tclvalue(consensusValue)=="1") assign("Rconsensus", TRUE, envir=env)
else assign("Rconsensus", FALSE, envir=env)
if(tclvalue(PANOVAValue)=="1") assign("RPANOVA", TRUE, envir=env)
else assign("RPANOVA", FALSE, envir=env)
if(tclvalue(XfinValue)=="1") assign("RXfin", TRUE, envir=env)
else assign("RXfin", FALSE, envir=env)
if(tclvalue(correlationsValue)=="1") assign("Rcorrelations", TRUE, envir=env)
else assign("Rcorrelations", FALSE, envir=env)
if(tclvalue(RVValue)=="1") assign("RRV", TRUE, envir=env)
else assign("RRV", FALSE, envir=env)
if (tclvalue(Fichier)=="") assign("RFichier", NULL, envir=env)
assign("RFichier", tclvalue(Fichier), envir=env)
tkdestroy(SortieWin)
}
RV.lab<-tklabel(SortieWin, text=.Facto_gettext("RV coefficients between partial configurations"))
RV.check<-tkcheckbutton(SortieWin)
if(RRV) RVValue<-tclVar("1")
else RVValue<-tclVar("0")
tkconfigure(RV.check,variable=RVValue)
RVs.lab <-tklabel(SortieWin, text=.Facto_gettext("Standardized RV coefficients between partial configurations"))
RVs.check <- tkcheckbutton(SortieWin)
if(RRVs) RVsValue <- tclVar("1")
else RVsValue <- tclVar("0")
tkconfigure(RVs.check,variable=RVsValue)
simi.lab<-tklabel(SortieWin,text=.Facto_gettext("Procrustes similarity indexes between partial configurations"))
simi.check <- tkcheckbutton(SortieWin)
if(Rsimi) simi.Value <- tclVar("1")
else simi.Value <- tclVar("0")
tkconfigure(simi.check,variable=simi.Value)
scaling.lab<-tklabel(SortieWin,text=.Facto_gettext("Isotropic scaling factors"))
scaling.check <- tkcheckbutton(SortieWin)
if(Rscaling) scalingValue <- tclVar("1")
else scalingValue <- tclVar("0")
tkconfigure(scaling.check,variable=scalingValue)
dep.lab <-tklabel(SortieWin, text=.Facto_gettext("Initial partial configurations"))
dep.check <- tkcheckbutton(SortieWin)
if(Rdep) depValue <- tclVar("1")
else depValue <- tclVar("0")
tkconfigure(dep.check,variable=depValue)
consensus.lab<-tklabel(SortieWin,text=.Facto_gettext("Consensus configuration"))
consensus.check <- tkcheckbutton(SortieWin)
if(Rconsensus) consensusValue <- tclVar("1")
else consensusValue <- tclVar("0")
tkconfigure(consensus.check,variable=consensusValue)
Xfin.lab<-tklabel(SortieWin,text=.Facto_gettext("Partial configurations after transformations"))
Xfin.check <- tkcheckbutton(SortieWin)
if(RXfin) XfinValue <- tclVar("1")
else XfinValue <- tclVar("0")
tkconfigure(Xfin.check,variable=XfinValue)
PANOVA.lab<-tklabel(SortieWin,text=.Facto_gettext("Procrustes Analysis of Variance tables, per configuration, per objet, per dimension"))
PANOVA.check <- tkcheckbutton(SortieWin)
if(RPANOVA) PANOVAValue <- tclVar("1")
else PANOVAValue <- tclVar("0")
tkconfigure(PANOVA.check,variable=PANOVAValue)
correlations.lab<-tklabel(SortieWin,text=.Facto_gettext("Correlations between initial partial configurations and consensus dimensions"))
correlations.check <- tkcheckbutton(SortieWin)
if(Rcorrelations) correlationsValue <- tclVar("1")
else correlationsValue <- tclVar("0")
tkconfigure(correlations.check,variable=correlationsValue)
RFichierFrame<-tkframe(SortieWin,borderwidth=2)
if (is.null(RFichier)) Fichier <- tclVar("")
else Fichier<-tclVar(RFichier)
Fichier.entry <-tkentry(RFichierFrame,width="40",textvariable=Fichier)
SortieOK.but<-tkbutton(SortieWin,text="OK",width=16,command=onOK.sortie)
tkgrid(tklabel(SortieWin, text = .Facto_gettext("Select output options"), fg ="blue"), columnspan = 2, sticky = "w")
tkgrid(tklabel(SortieWin, text = " "))
tkgrid(consensus.lab,consensus.check,sticky="w")
tkgrid(RV.lab,RV.check,sticky="w")
tkgrid(RVs.lab,RVs.check,sticky="w")
tkgrid(simi.lab,simi.check,sticky="w")
tkgrid(scaling.lab,scaling.check,sticky="w")
tkgrid(dep.lab,dep.check,sticky="w")
tkgrid(Xfin.lab,Xfin.check,sticky="w")
tkgrid(correlations.lab,correlations.check,sticky="w")
tkgrid(PANOVA.lab,PANOVA.check,sticky="w")
tkgrid(tklabel(SortieWin, text = " "))
tkgrid(tklabel(RFichierFrame,text=.Facto_gettext("Print results on a 'csv' file")),Fichier.entry)
tkgrid(RFichierFrame)
tkgrid(SortieOK.but)
}
SortieFrame<-tkframe(IlluFrame)
Sortie.but<-tkbutton(SortieFrame, textvariable=.SortieLabel, command=OnSortie, borderwidth=3)
tkgrid(Sortie.but, sticky="ew")
})
#! fonction pour la gestion des options graphiques
PLOT.GPA<-defmacro(label, firstLabel, expr=
{
env<-environment()
compteur.graph<-0
.PlotLabel<-tclVar(paste(firstLabel, "", sep=" "))
#déclaration des variables
Rchoix<-TRUE
RTitle<-NULL
Rlabel.indMoy<-TRUE
Rhabillage<-"group"
Rpartial<-"all"
RpartialSouris<-FALSE
Rchrono<-FALSE
RXlimInd<-NULL
RYlimInd<-NULL
OnPlot<-function()
{
PlotWin<-tktoplevel()
tkwm.title(PlotWin,.Facto_gettext("Graphical options"))
tkwm.geometry(PlotWin, "-100+50")
PlotWin2<-tkframe(PlotWin)
#création de la fonction onOKsub
onOKsub<-function()
{
assign("compteur.graph", compteur.graph+1, envir=env)
if(compteur.graph>0) tclvalue(.PlotLabel)<-paste(label, .Facto_gettext(""), sep=" ")
tkconfigure(Plot.but, fg="blue")
#récupération des dimensions à représenter
# gestion des entrées de la partie graphique des individus
if(tclvalue(ind.check.value)==1) assign("Rchoix", TRUE, envir=env)
else assign("Rchoix", FALSE, envir=env)
if(Rchoix)
{
if (tclvalue(Titre)==" ") assign("RTitle", NULL, envir=env)
assign("RTitle", tclvalue(Titre), envir=env)
label.tmp.indMoy<-tclvalue(label.indMoy.checkValue)
if(label.tmp.indMoy==1) assign("Rlabel.indMoy", TRUE, envir=env)
else assign("Rlabel.indMoy", FALSE, envir=env)
habillage.tmp<-listgraph.nom[as.numeric(tkcurselection(listgraph))+1]
if(length(habillage.tmp)==0) assign("Rhabillage","none", envir=env)
else assign("Rhabillage", habillage.tmp, envir=env)
if(tclvalue(XlimIndMin)=="" | tclvalue(XlimIndMax)=="") assign("RXlimInd", NULL, envir=env)
else assign("RXlimInd", c(as.numeric(tclvalue(XlimIndMin)), as.numeric(tclvalue(XlimIndMax))), envir=env)
if(tclvalue(YlimIndMin)=="" | tclvalue(YlimIndMax)=="") assign("RYlimInd", NULL, envir=env)
else assign("RYlimInd", c(as.numeric(tclvalue(YlimIndMin)), as.numeric(tclvalue(YlimIndMax))), envir=env)
partial.tmp<-listpartial.nom[as.numeric(tkcurselection(listpartial))+1]
if(length(partial.tmp)==0) assign("Rpartial",NULL, envir=env)
else assign("Rpartial", partial.tmp, envir=env)
chrono.tmp<-tclvalue(partial.chrono.checkValue)
if(chrono.tmp=="1") assign("Rchrono", TRUE, envir=env)
else assign("Rchrono", FALSE, envir=env)
souris.tmp<-tclvalue(partial.souris.checkValue)
if(souris.tmp=="1") assign("RpartialSouris", TRUE, envir=env)
else assign("RpartialSouris", FALSE, envir=env)
}
tkdestroy(PlotWin)
}
# création l'interface "options graphiques"
##########################
# construction de la partie graphique des individus
PlotIndFrame<-tkframe(PlotWin, borderwidth=5, relief="groove")
RchoixFrame<-tkframe(PlotIndFrame,borderwidth=2)
ind.check<-tkcheckbutton(RchoixFrame)
if(Rchoix) ind.check.value<-tclVar("1")
else ind.check.value<-tclVar("0")
tkconfigure(ind.check, variable=ind.check.value)
tkgrid(tklabel(RchoixFrame, text=.Facto_gettext("Graph of the individuals"), font=font2),ind.check)
tkgrid(tklabel(RchoixFrame, text=" "))
RTitleFrame<-tkframe(PlotIndFrame,borderwidth=2)
if (is.null(RTitle)) Titre <- tclVar(" ")
else Titre<-tclVar(RTitle)
Titre.entry <-tkentry(RTitleFrame,width="40",textvariable=Titre)
tkgrid(tklabel(RTitleFrame,text=.Facto_gettext("Title of the graph")),Titre.entry)
RlabelFrame<-tkframe(PlotIndFrame,borderwidth=2)
label.indMoy.check<-tkcheckbutton(RlabelFrame)
if (Rlabel.indMoy) label.indMoy.checkValue<-tclVar("1")
else label.indMoy.checkValue<-tclVar("0")
tkconfigure(label.indMoy.check, variable=label.indMoy.checkValue)
tkgrid(tklabel(RlabelFrame, text=.Facto_gettext("Labels for the mean individuals")),label.indMoy.check)
RhabillageFrame<-tkframe(PlotIndFrame,borderwidth=2)
listgraph<-tklistbox(RhabillageFrame,height=3, selectmode="single",exportselection="FALSE",yscrollcommand=function(...) tkset(scrgraph,...))
scrgraph <-tkscrollbar(RhabillageFrame,repeatinterval=5,command=function(...)tkyview(listgraph,...))
listgraph.nom<-c("group","ind")
tkinsert(listgraph,"end",.Facto_gettext("by.group"))
tkinsert(listgraph,"end",.Facto_gettext("by.individual"))
if(Rhabillage=="group") tkselection.set(listgraph,0)
if(Rhabillage=="ind") tkselection.set(listgraph,1)
## indice<-2
## for (i in 1:ncol(donnee))
## {
## if(is.factor(donnee[,i]))
## {
## tkinsert(listgraph,"end",vars[i])
## listgraph.nom<-c(listgraph.nom,vars[i])
## if(Rhabillage==vars[i]) tkselection.set(listgraph, indice)
## indice<-indice+1
## }
## }
tkgrid(tklabel(RhabillageFrame, text=.Facto_gettext("Select drawing for the individuals")))
tkgrid(listgraph, scrgraph, sticky = "nw")
tkgrid.configure(scrgraph, sticky = "wns")
tkgrid.configure(listgraph, sticky = "ew")
RlimFrame<-tkframe(PlotIndFrame,borderwidth=2)
if(is.null(RXlimInd)) XlimIndMin<-tclVar("")
else XlimIndMin<-tclVar(paste(RXlimInd[1]))
XlimIndMin.entry <-tkentry(RlimFrame,width="5",textvariable=XlimIndMin)
if (is.null(RXlimInd)) XlimIndMax<- tclVar("")
else XlimIndMax<-tclVar(paste(RXlimInd[1]))
XlimIndMax.entry <-tkentry(RlimFrame,width="5",textvariable=XlimIndMax)
tkgrid(tklabel(RlimFrame,text=.Facto_gettext("x limits of the graph:")),XlimIndMin.entry, XlimIndMax.entry)
if(is.null(RYlimInd)) YlimIndMin<- tclVar("")
else YlimIndMin<-tclVar(paste(RYlimInd[1]))
YlimIndMin.entry <-tkentry(RlimFrame,width="5",textvariable=YlimIndMin)
if (is.null(RYlimInd)) YlimIndMax<- tclVar("")
else YlimIndMax<-tclVar(paste(RYlimInd[2]))
YlimIndMax.entry <-tkentry(RlimFrame,width="5",textvariable=YlimIndMax)
tkgrid(tklabel(RlimFrame,text=.Facto_gettext("y limits of the graph:")),YlimIndMin.entry,YlimIndMax.entry)
RpartialFrame<-tkframe(PlotIndFrame,borderwidth=2)
listpartial<-tklistbox(RpartialFrame,height=7, selectmode="extended",exportselection="FALSE",yscrollcommand=function(...) tkset(scrpartial,...))
scrpartial<-tkscrollbar(RpartialFrame,repeatinterval=5,command=function(...)tkyview(listpartial,...))
listpartial.nom<-NULL
indice<-0
for (i in 1:nrow(donnee)) {
tkinsert(listpartial,"end",rows[i])
listpartial.nom<-c(listpartial.nom,rows[i])
if(rows[i] %in% Rpartial) tkselection.set(listpartial, indice)
indice<-indice+1
}
partial.souris.check<-tkcheckbutton(RpartialFrame)
if (RpartialSouris) partial.souris.checkValue<-tclVar("1")
else partial.souris.checkValue<-tclVar("0")
partial.chrono.check<-tkcheckbutton(RpartialFrame)
if (Rchrono) partial.chrono.checkValue<-tclVar("1")
else partial.chrono.checkValue<-tclVar("0")
tkconfigure(partial.souris.check, variable=partial.souris.checkValue)
tkconfigure(partial.chrono.check, variable=partial.chrono.checkValue)
tkgrid(tklabel(RpartialFrame, text=.Facto_gettext("Select the individuals for which partial points are drawn")))
tkgrid(listpartial, scrpartial, sticky = "nw")
tkgrid.configure(scrpartial, sticky = "wns")
tkgrid.configure(listpartial, sticky = "ew")
tkgrid(tklabel(RpartialFrame, text=.Facto_gettext("Interactive selection of the individuals")), partial.souris.check)
tkgrid(tklabel(RpartialFrame, text=.Facto_gettext("Chronologic representation of the partial points")), partial.chrono.check)
#mise en page des différents frames de PlotIndFrame
tkgrid(RchoixFrame)
tkgrid(RTitleFrame)
tkgrid(tklabel(PlotIndFrame, text=" "))
tkgrid(RlabelFrame)
tkgrid(tklabel(PlotIndFrame, text=" "))
tkgrid(RhabillageFrame)
tkgrid(tklabel(PlotIndFrame, text=" "))
tkgrid(RpartialFrame)
tkgrid(tklabel(PlotIndFrame, text=" "))
tkgrid(RlimFrame)
tkgrid(tklabel(PlotIndFrame, text=" "))
#mise en page de plotWin
subOKCancelHelp(PlotWin, "plot.GPA")
tkgrid(PlotIndFrame, PlotWin2, sticky="ns")
tkgrid(subButtonsFrame, sticky="ew", columnspan=2)
}
PlotFrame<-tkframe(IlluFrame)
Plot.but<-tkbutton(PlotFrame, textvariable=.PlotLabel, command=OnPlot, borderwidth=3)
tkgrid(Plot.but, sticky="ew")
})
#! fonction associée au bouton Appliquer, execute sans détruire l'interface graphique
OnAppliquer<-function()
{
# récupération des paramètres de la fenêtre principale
nom.res<-tclvalue(resu.val)
if (length(which(ls(envir = .GlobalEnv, all.names = TRUE)==nom.res))>0) justDoIt(paste('remove (',nom.res,')')) #if object res already exists, it's removed
nbiter<-as.numeric(tclvalue(nbiter.val))
scaling<-as.logical(as.numeric(tclvalue(scale.bool)))
tolerance<-as.numeric(tclvalue(tolerance.val))
Axe<-c(as.numeric(tclvalue(Axe1)), as.numeric(tclvalue(Axe2)))
# gestion du tableau de données pour la GPA
group<-NULL
type<-NULL
name.group<-NULL
num.group.sup<-NULL
variables<-NULL
indice.grpe<-1
#récupération des groupes quanti actif
nb.GQA<-length(listQuantiAct.nom)
if(nb.GQA>=1) {
name.group<-c(name.group, listQuantiAct.nom)
for(i in 1:nb.GQA) {
eval(parse(text=paste("liste.var.GQA<-", listQuantiAct.nom[i], ".var", sep="")))
type<-c(type,liste.var.GQA[1])
## modif 7 juin
## variables<-c(variables, liste.var.GQA[-1])
## group<-c(group, length(liste.var.GQA)-1)
group<-c(group, length(liste.var.GQA))
variables<-c(variables, liste.var.GQA)
indice.grpe<-indice.grpe+1
}
}
#construction du tableau de données.GPA
commande.data<-paste(activeDataSet(),'.GPA', '<-', activeDataSet(),'[ , c("',paste(variables, collapse='", "'), '")]', sep="")
justDoIt(commande.data)
logger(commande.data)
donnee.depart<-activeDataSet()
activeDataSet(paste(activeDataSet(),'.GPA', sep=""))
# gestion de la commande réalisant la GPA
commande.GPA<-paste(nom.res, '<-GPA(', activeDataSet(), ', group=c(',paste(group, collapse=", "), '), name.group=c("',paste(name.group, collapse='", "'), '"), scale=',scaling,', graph=FALSE',sep="")
if (nbiter!=200) commande.GPA<-paste(commande.GPA, ', nbiteration =',nbiter,sep='')
if (tolerance!=1e-10) commande.GPA<-paste(commande.GPA, ', tolerance =',tolerance,sep='')
commande.GPA<-paste(commande.GPA, ')',sep='')
justDoIt(commande.GPA)
logger(commande.GPA)
if((Rchoix)&(length(which(ls(envir = .GlobalEnv, all.names = TRUE)==nom.res))>0)){
if ((Rhabillage!="none") & (Rhabillage!="ind") & (Rhabillage!="group")) {
Rhabillage<-which(colnames(get(getRcmdr(".activeDataSet")))==Rhabillage)
if(length(Rhabillage)==0) Rhabillage<-"none"
}
if (Rhabillage=="none") Rhabillage<-paste('"', Rhabillage, '"', sep="")
if (Rhabillage=="ind") Rhabillage<-paste('"', Rhabillage, '"', sep="")
if (Rhabillage=="group") Rhabillage<-paste('"', Rhabillage, '"', sep="")
if(RpartialSouris){
commande.plotI<-paste('plotGPApartial(', nom.res, ', axes=c(', paste(Axe, collapse=", "), '), lab.ind.moy=', Rlabel.indMoy, ', habillage=', Rhabillage, sep="")
if (!is.null(RXlimInd)) commande.plotI<-paste(commande.plotI, ', xlim=c(', paste(RXlimInd, collapse=", "), ')')
if (!is.null(RYlimInd)) commande.plotI<-paste(commande.plotI, ', ylim=c(', paste(RYlimInd, collapse=", "), ')')
if (Rchrono) commande.plotI<-paste(commande.plotI, ', chrono=', Rchrono, sep='')
if (is.null(RTitle)) commande.plotI <- paste(commande.plotI,')', sep="")
else {
if (RTitle ==" ") commande.plotI <- paste(commande.plotI,')', sep="")
else commande.plotI <- paste(commande.plotI,', title="', RTitle,'")', sep="")
}
}
else {
commande.plotI<-paste('plot.GPA(', nom.res, ', axes=c(', paste(Axe, collapse=", "), '), lab.ind.moy=', Rlabel.indMoy, ', habillage=', Rhabillage, sep="")
if (!is.null(RXlimInd)) commande.plotI<-paste(commande.plotI, ', xlim=c(', paste(RXlimInd, collapse=", "), ')')
if (!is.null(RYlimInd)) commande.plotI<-paste(commande.plotI, ', ylim=c(', paste(RYlimInd, collapse=", "), ')')
if (!is.null(Rpartial)) commande.plotI<-paste(commande.plotI, ', partial=c("', paste(Rpartial, collapse='", "'),'")', sep='')
if (Rchrono) commande.plotI<-paste(commande.plotI, ', chrono=', Rchrono, sep='')
if (is.null(RTitle)) commande.plotI <- paste(commande.plotI,')', sep="")
else {
if (RTitle ==" ") commande.plotI <- paste(commande.plotI,')', sep="")
else commande.plotI <- paste(commande.plotI,', title="', RTitle,'")', sep="")
}
}
justDoIt(commande.plotI)
logger(commande.plotI)
}
# gestion de l'édition de certains resultats
if (RFichier==""){
if(RRV) doItAndPrint(paste(nom.res, '$RV', sep=""))
if(RRVs) doItAndPrint(paste( nom.res, '$RVs', sep=""))
if(Rsimi) doItAndPrint(paste( nom.res, '$simi', sep=""))
if(Rscaling) doItAndPrint(paste( nom.res, '$scaling', sep=""))
if(Rdep) doItAndPrint(paste( nom.res, '$dep', sep=""))
if(Rconsensus) doItAndPrint(paste( nom.res, '$consensus', sep=""))
if(RXfin) doItAndPrint(paste( nom.res, '$Xfin', sep=""))
if(Rcorrelations) doItAndPrint(paste( nom.res, '$correlations', sep=""))
if(RPANOVA) doItAndPrint(paste( nom.res, '$PANOVA', sep=""))
}
else {
Fich = RFichier
if (substr(Fich,1,1)!='"') Fich = paste('"',Fich,sep='')
if (substr(Fich,nchar(Fich),nchar(Fich))!='"') Fich = paste(Fich,'"',sep='')
append = FALSE
if(RRV){
doItAndPrint(paste('write.infile(', nom.res, '$RV, file =',Fich,',append=',append,')', sep=""))
append = TRUE
}
if(RRVs){
doItAndPrint(paste('write.infile(', nom.res, '$RVs, file =',Fich,',append=',append,')', sep=""))
append = TRUE
}
if(Rsimi){
doItAndPrint(paste('write.infile(', nom.res, '$simi, file =',Fich,',append=',append,')', sep=""))
append = TRUE
}
if(Rscaling){
doItAndPrint(paste('write.infile(', nom.res, '$scaling, file =',Fich,',append=',append,')', sep=""))
append = TRUE
}
if(Rdep){
doItAndPrint(paste('write.infile(', nom.res, '$dep, file =',Fich,',append=',append,')', sep=""))
append = TRUE
}
if(Rconsensus){
doItAndPrint(paste('write.infile(', nom.res, '$consensus, file =',Fich,',append=',append,')', sep=""))
append = TRUE
}
if(RXfin){
doItAndPrint(paste('write.infile(', nom.res, '$Xfin, file =',Fich,',append=',append,')', sep=""))
append = TRUE
}
if(Rcorrelations){
doItAndPrint(paste('write.infile(', nom.res, '$correlations, file =',Fich,',append=',append,')', sep=""))
append = TRUE
}
if(RPANOVA) doItAndPrint(paste('write.infile(', nom.res, '$PANOVA, file =',Fich,',append=',append,')', sep=""))
}
# Re-chargement du tableau de départ et supression du tableau temporaire
activeDataSet(donnee.depart)
justDoIt(paste('remove(',activeDataSet(),'.GPA)',sep=""))
logger(paste('remove(',activeDataSet(),'.GPA)',sep=""))
}
#! fonction associée au bouton OK, execute et détruit l'interface graphique
onOK<-function()
{
OnAppliquer()
tkdestroy(top)
}
# Création de la fenêtre top #
################################################################################
top<-tktoplevel(borderwidth=10)
tkwm.title(top,.Facto_gettext("GPA"))
tkwm.geometry(top, "-50+50")
# définition des polices
font2<-tkfont.create(family="times",size=12,weight="bold")
fontheading<-tkfont.create(family="times",size=11,weight="bold")
# récupération du jeu de données actif
donnee<-get(getRcmdr(".activeDataSet"))
vars<-colnames(donnee)
rows<-rownames(donnee)
# création du frame contenant les listes groupes quanti
ListeQuantiFrame<- tkframe(top, borderwidth=2, relief="groove")
label.quantiFrame.var<-tclVar(.Facto_gettext("group"))
label.quantiFrame<-tklabel(ListeQuantiFrame, textvariable=label.quantiFrame.var,fg = "darkred", font=fontheading)
# liste des groupes de variables quanti Actives
listQuantiAct<-tklistbox(ListeQuantiFrame,selectmode="extended",exportselection="FALSE", height=4, yscrollcommand=function(...)tkset(scrQuantiAct,...))
scrQuantiAct<-tkscrollbar(ListeQuantiFrame,repeatinterval=5,command=function(...)tkyview(listQuantiAct,...))
listQuantiAct.nom<-NULL
# boutons d'action groupes quantitative
supprimeQuanti.funct(label=.Facto_gettext("Delete"))
ajoutQuanti.funct(label=.Facto_gettext("Add 1 group"), firstLabel=.Facto_gettext("Add 1 group"))
modifQuanti.funct(label=.Facto_gettext("Modify 1 group"), firstLabel=.Facto_gettext("Modify 1 group"))
# mise en forme de ListeQuantiFrame
tkgrid(label.quantiFrame, columnspan=11, sticky = "ew")
tkgrid(listQuantiAct, scrQuantiAct)
tkgrid.configure(scrQuantiAct, column=3, sticky="wns")
tkgrid.configure(listQuantiAct, sticky = "ew", column=4, columnspan=2)
tkgrid.configure(tklabel(ListeQuantiFrame, text=" "))
tkgrid.configure(GpeQuantiFrame,ModifGpeQuantiFrame, SupGpeQuantiFrame)
tkgrid.configure(GpeQuantiFrame, sticky = "ew", column=1, columnspan=2)
tkgrid.configure(ModifGpeQuantiFrame, sticky = "ew", column=4, columnspan=2)
tkgrid.configure(SupGpeQuantiFrame, sticky = "ew", column=7, columnspan=2)
tkgrid.columnconfigure(ListeQuantiFrame,0, minsize=25)
tkgrid.columnconfigure(ListeQuantiFrame,10, minsize=25)
tkgrid.columnconfigure(ListeQuantiFrame,3, minsize=25)
tkgrid.columnconfigure(ListeQuantiFrame,9, minsize=25)
tkgrid.columnconfigure(ListeQuantiFrame,4, minsize=35)
tkgrid.columnconfigure(ListeQuantiFrame,5, minsize=35)
# création de tous les boutons d'options dans IlluFrame
IlluFrame<- tkframe(top, borderwidth=2)
# mise en page de IlluFrame
PLOT.GPA(label=.Facto_gettext("Graphical options"), firstLabel=.Facto_gettext("Graphical options"))
Sortie.funct(label=.Facto_gettext("Outputs"), firstLabel=.Facto_gettext("Outputs"))
tkgrid(PlotFrame, SortieFrame, columnspan=7)
tkgrid.configure(PlotFrame, column=1, columnspan=1)
tkgrid.configure(SortieFrame, column=3, columnspan=1)
tkgrid.columnconfigure(IlluFrame,0, minsize=25)
tkgrid.columnconfigure(IlluFrame,2, minsize=40)
tkgrid.columnconfigure(IlluFrame,4, minsize=25)
# création des options dans OptionFrame
OptionFrame<-tkframe(top, borderwidth=2, relief="groove")
resu.lab<-tklabel(OptionFrame,text=.Facto_gettext("Name of the result object: "))
resu.val<-tclVar("res")
resu<-tkentry(OptionFrame,width=10,textvariable=resu.val)
scale.check <- tkcheckbutton(top)
scale.bool <- tclVar("1")
tkconfigure(scale.check,variable=scale.bool)
scale.lab<-tklabel(OptionFrame,text=.Facto_gettext("Scale the variables:"))
nbiter.lab<-tklabel(OptionFrame,text=.Facto_gettext("Maximum number of iteration for the algorithm:"))
nbiter.val<-tclVar("200")
nbiter<-tkentry(OptionFrame,width=5,textvariable=nbiter.val)
tolerance.lab<-tklabel(OptionFrame,text=.Facto_gettext("Stopping threshold for the algorithm:"))
tolerance.val<-tclVar("1e-10")
tolerance<-tkentry(OptionFrame,width=5,textvariable=tolerance.val)
Axe.label<-tklabel(OptionFrame,text=.Facto_gettext("Select the dimensions for the graphs:"))
Axe1<-tclVar("1")
Axe2<-tclVar("2")
Axe1.entry <-tkentry(OptionFrame,width="5",textvariable=Axe1)
Axe2.entry <-tkentry(OptionFrame,width="5",textvariable=Axe2)
# mise en page de OptionFrame
tkgrid(tklabel(OptionFrame,text=.Facto_gettext("Main options"), fg = "darkred"), columnspan=8, sticky="we")
tkgrid(tklabel(OptionFrame,text=""))
tkgrid(scale.lab,scale.check,sticky="w")
tkgrid(Axe.label,Axe1.entry , Axe2.entry, sticky="w")
tkgrid(nbiter.lab, nbiter)
tkgrid(tolerance.lab, tolerance)
tkgrid(resu.lab, resu)
tkgrid.configure(scale.lab, nbiter.lab, tolerance.lab, resu.lab, Axe.label, column=1, columnspan=4, sticky="w")
tkgrid.configure(scale.check, tolerance, nbiter, resu, column=6, columnspan=2, sticky="e")
tkgrid.configure(Axe1.entry, column=6, columnspan=1, sticky="w")
tkgrid.configure(Axe2.entry, column=7, columnspan=1, sticky="e")
tkgrid.columnconfigure(OptionFrame,0, minsize=25)
tkgrid.columnconfigure(OptionFrame,5, minsize=40)
tkgrid.columnconfigure(OptionFrame,8, minsize=25)
appliquer.but<-tkbutton(top, text=.Facto_gettext("Apply"),width=12,command=OnAppliquer, borderwidth=3, fg="#690f96")
OKCancelHelp(helpSubject="GPA",reset="Reinitializ.funct", apply ="OnAppliquer")
# Mise en page de top
tkgrid(tklabel(top, text=.Facto_gettext("General Procrustes Analysis (GPA)"),font=fontheading), columnspan=3)
tkgrid(tklabel(top,text=""))
tkgrid(ListeQuantiFrame, column=1, columnspan=1, sticky="ew")
tkgrid(tklabel(top,text=""))
tkgrid(IlluFrame, column=1, columnspan=1)
tkgrid(tklabel(top,text=""))
tkgrid(OptionFrame, column=1, columnspan=1)
tkgrid(tklabel(top,text="")) # Ligne de blanc
# tkgrid(appliquer.but, column=1, columnspan=1)
# tkgrid(tklabel(top,text="")) # Ligne de blanc
# tkgrid(buttonsFrame, column=1, columnspan=1, sticky="ew" )
tkgrid(buttonsFrame, appliquer.but)
tkgrid.configure(buttonsFrame, column=1,sticky="e")
tkgrid.configure(appliquer.but, column=2,sticky="w")
}
|
dfd29d831672d1a4e494c6609fa264eecbef5847 | e6cdcd551bc68c7b51ada507f7afb2d898f69a69 | /R/dc_cn.R | ae63ac38e6040971565483b0bea8803c423a775d | [
"MIT"
] | permissive | ropensci/rdatacite | aefaa9eeaab16b0379da0d7299b071805d266d87 | b86e18346d60ebaa279c047e6d27276051aed282 | refs/heads/master | 2023-05-22T13:37:21.493783 | 2023-01-02T23:01:57 | 2023-01-02T23:01:57 | 2,521,192 | 20 | 3 | NOASSERTION | 2020-01-02T19:57:41 | 2011-10-05T18:38:29 | R | UTF-8 | R | false | false | 2,158 | r | dc_cn.R | #' DataCite content negotation
#'
#' @export
#' @param dois (character) one or more DOIs
#' @param format Name of the format. One of "rdf-xml", "turtle",
#' "citeproc-json", "schemaorg", "codemeta", "text", "ris", "bibtex"
#' (default), "datacite-xml", "datacite-json", "bibentry", or
#' "jats".
#' @param style a CSL style (for text format only). See
#' ‘rcrossref::get_styles()’ for options. Default: 'apa'. If there's
#' a style that DataCite doesn't support you'll get a
#' (500) Internal Server Error
#' @param locale Language locale. See ‘?Sys.getlocale’
#' @param ... curl options passed on to [crul::verb-GET]
#' @references
#' https://support.datacite.org/docs/datacite-content-resolver
#' @seealso see also `rcrossref::cr_cn` for a more general purpose
#' content negotation interface
#' @examples \dontrun{
#' dc_cn("10.5281/zenodo.50213")
#' dc_cn(c("10.5281/zenodo.50213", "10.5281/zenodo.57081"), "text")
#' dc_cn(c("a-bad-doi", "10.5281/zenodo.50213", "10.5281/zenodo.57081"), "text")
#' }
dc_cn <- function(dois, format = "bibtex", style = "apa", locale = "en-US",
...) {
assert(dois, "character")
assert(format, "character")
assert(style, "character")
assert(locale, "character")
pick <- c(
"rdf-xml" = "application/rdf+xml",
"turtle" = "text/turtle",
"citeproc-json" = "application/vnd.citationstyles.csl+json",
"schemaorg" = "application/ld+json",
"codemeta" = "application/vnd.codemeta.ld+json",
"text" = "text/x-bibliography",
"ris" = "application/x-research-info-systems",
"bibtex" = "application/x-bibtex",
"datacite-xml" = "application/vnd.datacite.datacite+xml",
"datacite-json" = "application/vnd.datacite.datacite+json",
"bibentry" = "application/x-bibtex",
"jats" = "application/vnd.jats+xml")
type <- pick[[format]]
con <- crul::HttpClient$new(
url = "https://data.crosscite.org",
opts = c(list(followlocation = 1), ...),
headers = list(
Accept = type,
`User-Agent` = rdatacite_ua(),
`X-USER-AGENT` = rdatacite_ua()
)
)
lapply(dois, function(w) {
res <- con$get(w)
warn_stat(res)
res$parse("UTF-8")
})
}
|
cd8c218198c208e2e5d6188f3ee182d9f30f6610 | a2737db3578b6574f7e0994ead5bdc5a3c7ff6aa | /whole_Trans/combine/Global/multi_RNA_exp_barplot.R | 3d8f3a94370a896829907b34a984d084522b5200 | [] | no_license | zm-git-dev/research_pipline | 4c7df0e0750d9e92bead8ce5dbc96a0c570f2059 | 543263053aa11dc538c5559509dc15c5ddab1163 | refs/heads/master | 2022-08-18T01:18:44.871373 | 2019-05-15T10:42:46 | 2019-05-15T10:42:46 | 192,702,478 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,566 | r | multi_RNA_exp_barplot.R | args<-commandArgs(TRUE)
if(length(args)!=2){
print("1. input file")
print("2. output prefix")
}
############## main codes ###############
data<-read.csv(args[1],sep="\t",header=T,row.names=1)
data<-as.matrix(data)
rnatype<-colnames(data)
chr<-rownames(data)
sum<-0
for(i in 1:length(chr)){sum=sum + nchar(chr[i])}
mean<-sum/length(chr)
colors <- c("green","yellow","purple","blue")
colors<-colors[1:length(rnatype)]
file<-paste(args[2],".png",sep="")
png(file,width=1440*dim(data)[1]/30,height=480)
position<-barplot(t(data), beside = TRUE,main="Different RNA expression Ratio in different chromosomes",ylim=c(0,max(data)*1.3),ylab="Ratio",col=colors,axisnames=F)
legend("topright", colnames(data), cex=1.0, fill=colors,bty="n")
x <- (position[1,] + position[2,])/2+2
y <- par("usr")[3]-0.05
if(mean>3){
text(x, y, font = 10,pos=2,labels=rownames(data), adj=1, srt=45, xpd=TRUE)
}
if(mean<=3){
text(x, y, font = 10,pos=2,labels=rownames(data), adj=1, xpd=TRUE)
}
box()
dev.off()
file<-paste(args[2],".pdf",sep="")
pdf(file,width=21*dim(data)[1]/30,height=7)
position<-barplot(t(data), beside = TRUE,main="Different RNA expression Ratio in different chromosomes",ylim=c(0,max(data)*1.3),ylab="Ratio",col=colors,axisnames=F)
legend("topright", colnames(data), cex=1.0, fill=colors,bty="n")
x <- (position[1,] + position[2,])/2+2
y <- par("usr")[3]-0.05
if(mean>3){
text(x, y, font = 10,pos=2,labels=rownames(data), adj=1, srt=45, xpd=TRUE)
}
if(mean<=3){
text(x, y, font = 10,pos=2,labels=rownames(data), adj=1, xpd=TRUE)
}
box()
dev.off()
|
656f791974f4965ad7b3ecf9650c2ede8351171f | 1291f9c4909cb5236dca9b2f8f4f6560c1040b94 | /man/assertCharacteristics.Rd | c868d13eca75445ee62fea2bea63dec2f4e34a89 | [] | no_license | cran/IBMPopSim | 4718088f6da032fa83fe534cdc82eb49ddf22c55 | 40f6c5ae8b045d98340b9a7b168b9f4df8ff40c5 | refs/heads/master | 2023-01-07T17:18:30.302973 | 2023-01-07T11:50:02 | 2023-01-07T11:50:02 | 305,096,593 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 286 | rd | assertCharacteristics.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{assertCharacteristics}
\alias{assertCharacteristics}
\title{Function for internal use}
\usage{
assertCharacteristics(characteristics)
}
\description{
Check characteristics.
}
\keyword{internal}
|
985e0c226af93c1c23e8e8b36dd728191845cca8 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/SCORPIUS/examples/infer_trajectory.Rd.R | fc10bc14fc49a9b1b13521d65113d1e0dc2ce38c | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 649 | r | infer_trajectory.Rd.R | library(SCORPIUS)
### Name: infer_trajectory
### Title: Infer linear trajectory through space
### Aliases: infer_trajectory
### ** Examples
## Generate an example dataset and visualise it
dataset <- generate_dataset(type = "poly", num_genes = 500, num_samples = 1000, num_groups = 4)
space <- reduce_dimensionality(dataset$expression, correlation_distance, ndim=2)
draw_trajectory_plot(space, progression_group = dataset$sample_info$group_name)
## Infer a trajectory through this space
traj <- infer_trajectory(space)
## Visualise the trajectory
draw_trajectory_plot(space, path=traj$path, progression_group=dataset$sample_info$group_name)
|
12ab374189b7f938d6f247c22cfc72d6d16f3acc | 84e96d529ccdb74e16e9b516cd69cbb0759520ed | /R/wrangling-session.R | c7d023c974824476db7f20cd5f6d2a0df3f999d7 | [] | no_license | Katjaingstrup/learning-r | bb8cfc744c5ef6960345a4ba8ea1537992dc1f78 | 680057fcd31f349b41267ea8caac5b0233d6128d | refs/heads/master | 2020-04-27T00:14:00.190606 | 2019-03-05T10:30:30 | 2019-03-05T10:30:30 | 173,927,888 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,179 | r | wrangling-session.R | # run everytime you start a new sesion
source(here::here("R/package_loading.R"))
#dofile
usethis::use_r("exercises-wrangling")
glimpse(NHANES)
NHANES - NHANES
view(NHANES)
#Exercise 1
# Load the packages
library(tidyverse)
library(NHANES)
# Check column names
colnames(NHANES)
# Look at contents
#structure
str(NHANES)
glimpse(NHANES)
# See summary of the whole dataset
summary(NHANES)
# Look over the dataset documentation
?NHANES
##########
colnames(NHANES)
NHANES %>% colnames(
# a standard way of chaining functions together
NHANES %>%
head() %>%
glimpse()
# Mutate function ---------------------------------------------------------
NHANES_Changed <- NHANES %>%
mutate(height = Height/100)
sum(NHANES_Changed$height)
str(NHANES_Changed$height)
#creates a new variable based on a condition
#create or replace mutible variables by using ","
NHANES_Changed <- NHANES_Changed%-%
mutate(new_colum ="only one variable")
hight=Height/100
# Exercise 2 --------------------------------------------------------------
#1 Create a new variable called “UrineVolAverage” by calculating the average urine volumne
#(from “UrineVol1” and “UrineVol2”).
#2 Modify/replace the “Pulse” variable to beats per second (currently is beats per minute).
#3 Create a new variable called “YoungChild” when age is less than 6 years.
# Check the names of the variables
colnames(NHANES)
# Pipe the data into mutate function and:
NHANES_modified <- NHANES %>% # dataset
mutate(
# 1. Calculate average urine volume
UrineVolAvarage=(UrineVol1 + UrineVol2/2),
# 2. Modify Pulse variable
PulseSeconds = Pulse/60,
# 3. Create YoungChild variable using a condition
YoungChild = if_else(Age<6, TRUE, FALSE)
)
NHANES_modified
# To select specific data or collums of the data frame --------------------
#select collums / variabkes by name without Qotes
NHANES_Charateristics < NHANES%<%
select(Age, Gender, BMI)
#to not select a variable use minus
NHANES%<%
select(-Head Circ)
#to select similar names, use "matching" functions
NHANES%<%
select(starts_with("BP"), contains("Vol"))
?select_helpers
#rename() renames specific colums
#rename using the form: "rename = oldname
rename(numberBabies = nBabies)
# filter() = filtering/subsetting the data by row
NHANES%<%
filter(Gender == "female")
filter(Gender != "female")
#when BMI is equal to
NHANES%<%
filter(BMI == 25)
#when BMI is under 25 or equal to
filter(BMI<- 25)
#When BMII is 25 and gender female
NHANES%<%
filter(BMI == 25 & Gender == "female")
#When BMII is 25 or gender female
filter(BMI == 25 | Gender == "female")
# Arrange() sort or rearrange your data by colums -------------------------
NHANES%<%
arrange(Age) %>%
select(Age)
#order decending
NHANES%<%
# Exercise 3 --------------------------------------------------------------
# Exercise: Filtering and logic, arranging, and selecting
# Then start replacing the ___ with the appropriate code to complete the tasks below:
# Filter so only those with BMI more than 20 and less than 40 and keep only those with diabetes.
# Filter to keep those who are working (“Work”) or those who are renting (“HomeOwn”) and those who do not have diabetes. Select the variables age, gender, work status, home ownership, and diabetes status.
#Using sorting and selecting, find out who has had the most number of babies and how old they are.
# To see values of categorical data
summary(NHANES)
# 1. BMI between 20 and 40 and who have diabetes
NHANES %>%
# format: variable >= number
filter(BMI >= 20 & BMI <= 40 & Diabetes == "Yes")
# 2. Working or renting, and not diabetes
NHANES %>%
filter(Work == Working | ___ == ___ & ___ == ___) %>%
select(___)
# 3. How old is person with most number of children.
___ %>%
___(___) %>%
___(___)
# summarise() ------------------------------------------------------------
#summarise() by itself
NHANES %>%
summarise(MaxAge = max(Age, na.rm = TRUE),
MeanBMI = mean(BMI, na.rm = TRUE))
# combined with group_by()
NHANES %>%
group_by(Gender,Diabetes) %>%
summarise(MaxAge = max(Age, na.rm = TRUE),
MeanBMI = mean(BMI, na.rm = TRUE))
# #gather():converting from wide to long form -----------------------------
table4b
table4b %>%
gather(key = year, value = population, -country)
# this does the same
table4b %>%
gather(key = year, value = population, `1999`, `2000`)
# Keep only variables of interest
nhanes_chars <- NHANES %>%
select(SurveyYr, Gender, Age, Weight, Height, BMI, BPSysAve)
nhanes_chars
#convert to long form excluding Gender year
nhanes_long <- nhanes_chars %>%
gather(Measure, Value, -SurveyYr, -Gender)
nhanes_long
# Calculate mean on each measure, by gender and year
nhanes_long %>%
group_by(SurveyYr, Gender, Measure) %>%
summarise(MeanValues = mean(Value, na.rm = TRUE))
# #spread -----------------------------------------------------------------
table2
table2 %>%
spread(key = type, value = count)
|
0800953a07215a7c854c57006b2ba5d2e065ad92 | e8b9b4b3d81f83d09ac04528b2050908cf58be30 | /R/fn_convert_to_HS10.R | 51331495199438f42bf440e7e63f814875a42579 | [] | no_license | thefactmachine/high_medium_tech_exports | 9f158f58d1a172d0247f4c82413eb6426f146f0a | b3b91ef617d3c7a500058c331f9bf9af6eaf845e | refs/heads/master | 2021-01-09T20:08:43.062509 | 2016-06-07T03:06:08 | 2016-06-07T03:06:08 | 60,559,424 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 360 | r | fn_convert_to_HS10.R | fn_convert_to_HS10 <- function(a_string) {
# this function takes a value such as: "0101.10.00.24"
# and removes the periods (i.e ".") and returns: "0101100024"
str1 <- substr(a_string, 1, 4)
str2 <- substr(a_string, 6, 7)
str3 <- substr(a_string, 9, 10)
str4 <- substr(a_string, 12, 13)
str_return <- paste0(str1, str2, str3, str4)
return(str_return)
}
|
fed17f79fdae98d7259715874877f4cbe9a9b721 | 9625cc9d6566714328fa66f8e9f32976d0a3025f | /R/ols-data-hsb.R | 995a2e80b9b40ce4b313b7f5e2b089d6d5dd435a | [] | no_license | topepo/olsrr | 8007f3a60d5829dfb7faf6a2f8690f3473b7cd87 | db9b842f3eb13b8f4c8ea38d3d1afcd31442448d | refs/heads/master | 2020-04-06T12:40:57.904811 | 2018-11-14T01:50:26 | 2018-11-14T01:50:26 | 157,465,680 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 23 | r | ols-data-hsb.R | #' Test Data Set
"hsb"
|
c0d8111b4bda26716d3d5d95ce34669338497b3a | 6ba493ca9129518a3a9d52826beb6d3404b140da | /R/CAAElementsPlanetaryOrbit_VenusSemimajorAxis.R | 5f66a4ae3cf90e44b45c38357184ac38e5118fd4 | [] | no_license | helixcn/skycalc | a298e7e87a46a19ba2ef6826d611bd9db18e8ee2 | 2d338b461e44f872ceee13525ba19e17926b2a82 | refs/heads/master | 2021-06-16T08:54:46.457982 | 2021-03-25T02:15:38 | 2021-03-25T02:15:38 | 35,885,876 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 122 | r | CAAElementsPlanetaryOrbit_VenusSemimajorAxis.R | CAAElementsPlanetaryOrbit_VenusSemimajorAxis <-
function() {
.Call("CAAElementsPlanetaryOrbit_VenusSemimajorAxis" )
}
|
c76d423c7860242bf9a82733539d55c924605e16 | 8c7dc1facdc0672df7fed535258a1d446b419c9d | /03_covariate_selection/01_lasso_functions.R | aed7ff3c8668de3ac1663556269a67bdc68d6a47 | [
"MIT"
] | permissive | boyercb/mmc | 2b0dbebe5d6f0fcce60c9fedadd520a6ddc26e7c | 654fe8d8e59a503b0ac835433b270a21aaa7f47b | refs/heads/master | 2020-06-26T18:11:50.404833 | 2020-01-21T03:30:42 | 2020-01-21T03:30:42 | 199,709,960 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,372 | r | 01_lasso_functions.R | # Core functions for the lasso analysis -----------------------------------
# This function performs k-fold cross-validation, calculating the
# tuning parameter (lambda) with the lowest average (across folds)
# test error.
lasso_cv <- function(outcome_name, covariates, data, N_folds = 30, ...){
# glmnet only takes matrices
Y <- as.matrix(data[ ,outcome_name])
X <- as.matrix(data[ ,covariates])
cv.glmnet(x = X, y = Y,
# Use MSE minimization for CV
type.measure = "deviance",
# Number of folds
nfolds = N_folds,
# Alpha = 1 sets glmnet() to use lasso penalty
alpha = 1,
...)
}
# This function takes a set of cross-validated lasso models and returns the
# non-zero coefficients from the model that uses a lambda that minimizes the
# mean cross-validated error
tidy_lasso_covariates <- function(lasso_fit, lambda = "lambda.min"){
coefs <- coef(lasso_fit, s = lambda)
coefs <- ifelse(is.list(coefs),
as.list(coefs),
list(coefs))
coefs %>%
do.call(what = cbind) %>%
rowMeans() %>%
data.frame() %>%
rownames_to_column() %>%
select(rowname, ".") %>%
rename(term = rowname, estimate = ".") %>%
filter(estimate > 0) %>%
filter(term != "(Intercept)")
}
# This function is a wrapper for lasso_cv and tidy_lasso_covariates that
# performs cross-validated lasso sims times, averages the lambdas that
# minimize CV error across the sims runs, and returns the covariates selected
# using that average lambda
select_covariates <- function(outcome_name, covariates, data, N_folds = 30, sims = 10,...){
print(paste0("Selecting covariates for ", outcome_name))
# Do k-fold CV sims times, returning sims CV models
lambdas <- lapply(X = 1:sims,
FUN = function(i) lasso_cv(outcome_name = outcome_name,
covariates = covariates,
data = data,
N_folds = N_folds,
... = ...))
# Get lambda that minimizes mean CV error for each
min_lambdas <- sapply(lambdas, function(x) x$lambda.min)
# Use the first model (doesn't matter which one) with
# average lambda to get covariates with optimal lambda
tidy_lasso_covariates(
lasso_fit = lambdas[[1]],
lambda = mean(min_lambdas)) %>%
mutate(outcome = outcome_name) %>%
select(outcome, term, estimate)
}
|
54a4ada73624e1c646af63da3db2bbf9bee42ebf | c73d18ab97654306effe3696a5df5982ce56845d | /PipelineInitiator.R | 4ae436aef467aa043effd72392f56aba4a82a976 | [
"Apache-2.0"
] | permissive | SEED-research-lab/edX-clustering | 0234f99b8dc04c91d2cf8d56f87a379d4defedc3 | a7c9e90ff3d7d96fdfe5d0aff23daaa0877af7d5 | refs/heads/master | 2021-03-27T15:51:51.862784 | 2020-02-03T20:53:27 | 2020-02-03T20:53:27 | 89,016,040 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 12,999 | r | PipelineInitiator.R | ## ===================================================== ##
# Title: Pipeline Initiator ####
# Project: edX data pipeline for course user clustering analytics
# https://tzwilliams.github.io/edX-clustering/
#
# Copyright 2017 Krishna Madhavan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
# Authors: Krishna Madhavan, Kerrie Douglas, Doipayan Roy, and Taylor Williams
# Affiliation: Purdue University
#
# Description: Initiates the clustering pipeline, sequentially calling the pipeline scripts
# and providing continuity between them.
#
# File input stack:
# {org}-{course}-{date}-course_structure-prod-analytics.json (source: edX)
# {org}-{course}-{date}-courseware_studentmodule-prod-analytics.sql (source: edX)
# {org}-{course}-{date}-auth_userprofile-prod-analytics.sql (source: edX)
#
# Package dependencies: cluster, e1071, tidyverse, readxl, jsonlite, readr, tcltk, beepr, data.table
#
# Changelog:
# 2017.07.14. Initial version
# 2017.08.06. Update to comments; spell check
# 2018.11.04. Automation updates
#
#
# Feature wish list: (*: planned but not complete)
# * remove unnecessary packages from the PackRat library
# * allow user to give a timeframe of events to include
## ===================================================== ##
## _Clean the environment ####
rm(list=ls())
######### Internal functions ##########
#Function: Check to see if the current working directory contains an expected file.
# If not then prompt user to select the correct directory
WorkingDirectoryCheck <- function(expectedFile) {
if(file.exists(file.path(getwd(), expectedFile))){
#if file does exists in the current WD, exit the function returning TRUE
return(TRUE)
} else{
#check for likely locations, set wd automatically if found and return TRUE
if(file.exists(file.path(getwd(), "analytics", expectedFile))){
setwd(file.path(getwd(), "analytics"))
return(TRUE)
} else if(file.exists(file.path(dirname(getwd()), expectedFile))){
setwd(dirname(getwd())) #set wd to parent directory of current wd
return(TRUE)
} else{
#return FALSE if the file does not exist in the current WD (or other obvious locations)
return(FALSE)
}
}
}
######### Setup ##########
## _Check for correct working directory ##########
#check for correct expected working directory, inform user if incorrect and stop running script
current.dir <- getwd()
thisFile = "PipelineInitiator.R"
expectedFile = file.path(thisFile)
if(!WorkingDirectoryCheck(expectedFile)){
message("\nThe current working directory is NOT CORRECT.
It is currently set to '", current.dir, "'
Please set it to the directory containing the '", thisFile,
"' file and rerun this script.\n")
#stop running current script
break
}
## _Get and create working directory variables ##########
#set working directory to correct subdirectory
orig.dir <- getwd()
analyticsPath <- file.path(orig.dir, "analytics")
setwd(analyticsPath)
## _Load required libraries ##########
require("stringr")
## _External function sourcing ##########
#load external functions
source("R/file-structure-functions.R")
######### Main ##########
#start a timer to track how long the pipeline takes to execute
start <- proc.time() #save the time (to report the pipeline's running time at the end of the script)
## Check for pre-defined starting directory and course prefix ####
if(!exists("filenamePrefix")) filenamePrefix <- NULL
if(!exists("dataFolderPath")) dataFolderPath <- NULL
if(!exists("courseName")) courseName <- NULL
## get data file locations from user ####
#get JSON
#Locate the JSON course structure data file to process (with sanitized user input)
filenameJSON <-
SelectFile(prompt = "*****Select the JSON COURSE STRUCTURE file.***** (It should end with 'course_structure-prod-analytics.json')",
defaultFilename = "course_structure-prod-analytics.json",
filenamePrefix = ifelse(exists("filenamePrefix") & !is.null(filenamePrefix),
yes = filenamePrefix, no = ""),
fileTypeMatrix = matrix(c("JSON", ".json"), 1, 2, byrow = TRUE),
dataFolderPath = ifelse(exists("dataFolderPath") & !is.null(dataFolderPath),
yes = dataFolderPath, no = ""))
#extract course prefix; extract the folder path
filenamePrefix <- str_extract(string = basename(filenameJSON),
pattern = ".*(?=course_structure-prod-analytics.json$)")
courseName <- str_extract(string = filenamePrefix,
pattern = "^[^-]*-[^-]*(?=-)")
dataFolderPath <- dirname(filenameJSON)
#try to automatically get the other files (ask for them if fails)
#Locate the clickstream data file to process (with sanitized user input)
filename_moduleAccess <-
SelectFile(prompt = "*****Select the SQL CLICKSTREAM data file.***** (It should end with 'courseware_studentmodule-prod-analytics.sql')",
defaultFilename = "courseware_studentmodule-prod-analytics.sql",
filenamePrefix = ifelse(exists("filenamePrefix") & !is.null(filenamePrefix),
yes = filenamePrefix, no = ""),
fileTypeMatrix = matrix(c("SQL", ".sql"), 1, 2, byrow = TRUE),
dataFolderPath = ifelse(exists("dataFolderPath") & !is.null(dataFolderPath),
yes = dataFolderPath, no = ""))
#Locate the USER PROFILE data file to process (with sanatized user input)
filenameUserProfile <-
SelectFile(prompt = "*****Select the SQL USER PROFILE data file.***** (It should end with 'auth_userprofile-prod-analytics.sql')",
defaultFilename = "auth_userprofile-prod-analytics.sql",
filenamePrefix = ifelse(exists("filenamePrefix") & !is.null(filenamePrefix),
yes = filenamePrefix, no = ""),
fileTypeMatrix = matrix(c("SQL", ".sql"), 1, 2, byrow = TRUE),
dataFolderPath = ifelse(exists("dataFolderPath") & !is.null(dataFolderPath),
yes = dataFolderPath, no = ""))
#import data files ####
data_courseStructure <- jsonlite::fromJSON(filenameJSON)
data_moduleAccess <- readr::read_tsv(filename_moduleAccess)
dataUserProfile <-
data.table::fread(filenameUserProfile,
select = c("id", "user_id", "gender",
"year_of_birth", "level_of_education", "country"),
quote = "")
DirCheckCreate(subDir = courseName)
#ask user if they want to pre-specify the clustering technique and number of clusters
repeat{
beepr::beep(sound = 10) #notify user to provide input
pre_specify <- readline(prompt="Would you like to PRE-SPECIFY the clustering technique, population, and number of clusters? (Y/N):
(CAUTION: not recommended if ideal number of clusters for data is not yet known)");
if(pre_specify == "y" || pre_specify == "Y"){
pre_specify <- TRUE
######### User providing dataset details #####
beepr::beep(sound = 10) #notify user to provide input
cat("\nEnter a description of this datasest (to be included on graphs).
(suggested format: [Data source, e.g., edX], [Course number, e.g., nano515x], [Data date, e.g., Data from 2016.11.18])")
dataSetDescription <- readline(prompt="Description: ");
## get clustering technique
repeat{
clusterTypeSelection <- readline(prompt="\nEnter '1' or {nothing} for K-means clustering,
'2' for c-means (fuzzy) clustering: ");
#exit loop and continue script if input valid
if(clusterTypeSelection == 1 |
clusterTypeSelection == "" |
clusterTypeSelection == 2 ){
break
}
beepr::beep(sound = 10) #notify user to provide input
} #repeat if none of the conditions were met (i.e., user input was invalid)
##Get user input for number of clusters
repeat{
K <- readline("\nEnter the desired number of clusters (maximum 10): ");
K <- as.integer(K);
ifelse(!is.na(K) & (K > 0) & (K <= 10),
yes = break,
no = print("Please enter a valid number.", quote=FALSE))
}
## get population
repeat{
userSubsetSelection <- readline(prompt="\n Who to cluster?:
Enter '1' or {nothing} for all learners,
'2' or 'f' for female learners,
'3' or 'm' for male learners,
'4' live learners,
'5' late learners,
'6' archive learners,
'7' or 'c' custom ID list");
# set if the user subgroups should be calculated based on selection
if(userSubsetSelection == 4 |
userSubsetSelection == 5 |
userSubsetSelection == 6){
inputLLA <- '1' #set to find live, late, and archive groups,
}else{
inputLLA <- '2' #don't find live, late, and archive groups,
}
#exit loop and continue script if input valid
if(userSubsetSelection == 1 | userSubsetSelection == "" |
userSubsetSelection == 2 | userSubsetSelection == 'f' | userSubsetSelection == 'F' |
userSubsetSelection == 3 | userSubsetSelection == 'm' | userSubsetSelection == 'M' |
userSubsetSelection == 4 |
userSubsetSelection == 5 |
userSubsetSelection == 6 |
userSubsetSelection == 7 | userSubsetSelection == 'c'){
break
}
beepr::beep(sound = 10) #notify user to provide input
} #repeat if none of the conditions were met (i.e., user input was invalid)
#save selections to file to be recalled in 3_Clustering.R
save(list = c("dataSetDescription", "clusterTypeSelection", "K", "userSubsetSelection", "inputLLA"),
file = "initiator_userPreselectionValues.RData")
break
}else if(pre_specify == "n" || pre_specify == "N"){
pre_specify <- FALSE
dataSetDescription <- NULL
clusterTypeSelection <- NULL
K <- NULL
userSubsetSelection <- NULL
inputLLA <- NULL
save(list = c("dataSetDescription", "clusterTypeSelection", "K", "userSubsetSelection", "inputLLA"),
file = "initiator_userPreselectionValues.RData")
break
}
else{
message("Please enter either 'Y' or 'N'.\n")
}
} # repeat if invalid input provided
#source (run) the pipeline script files in sequence ####
source("1_extractModules.R")
source("2_Preprocessing.R")
source("2b_genderedSubsets.R")
source("3_Clustering.R")
message("\n**** Cluster graph created! ****\n")
#ask user if additional cluster charts are desired, if so run '3_Clustering.R' again
repeat{
beepr::beep(sound = 10) #notify user to provide input
continueClustering <- readline(prompt="Would you like to create another cluster graph from this data? (Y/N): ");
# delete the pre-selection data file
if (file.exists("initiator_userPreselectionValues.RData")) file.remove("initiator_userPreselectionValues.RData")
#if user selected to create an additional cluster graph
if(continueClustering == "y" || continueClustering == "Y"){
source("3_Clustering.R")
message("\n**** Cluster graph created! ****\n")
}
else if(continueClustering == "n" || continueClustering == "N"){
break
}
else{
message("Please enter either 'Y' or 'N'.\n")
}
#repeat unless if user indicated to end
}
# TW:bug: this variable appears to be deleted with the rm(list=ls()) commands within the
# pipeline. Find a more robust way to save this directory and the start time.
#return working directory to where it began when the script was called
# setwd(orig.dir)
#print the amount of time the script required
# cat("\n\n\nPipeline processing runtime details (in sec):\n")
# print(proc.time() - start)
#Indicate pipeline completion
message("\n**** Clustering pipeline complete! ****\n")
#Clear environment variables
rm(list=ls())
|
2ab985ad7f7e9c5f412a21131963f1ee3b8a8755 | d8774b9b2c2e759f9b20aeef51b81c31e92d7103 | /urban_mobility_research/random_traffic_distribution/Changi_2.R | 7d65f0b42268f61e8209ab9c7b62bc4a52cedbea | [] | no_license | ziqiguo/Guo-Ziqi-Undergrad | 78e33beefabf5f4442a3fc90656e965fe34e05f5 | eaacd92e8046bf229ccd7376b86ef06f51575927 | refs/heads/master | 2021-06-23T15:41:11.435911 | 2017-07-28T04:25:16 | 2017-07-28T04:25:16 | 64,583,012 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,667 | r | Changi_2.R | # Preparation of framework
firm <- read.csv('firm_buffer.csv')
firm <- firm[c(1,2,3,4,5,6,8)]
points <- read.csv('nodes.csv')
points <- merge(points,firm,by.x='ID',by.y='id',all.x=TRUE)
points <- subset(points,MTZ_1169!=549060)
results <- points[,c(1,2,8)]
names(results) <- c('ID','Zone','Num')
results <- results[results$Zone!=0,]
results <- results[order(results$Zone),]
results[is.na(results$Num),3] <- 1
results[results$Num==0,3] <- 1
library(plyr)
changi <- read.csv('changi_zones.csv')
changi <- changi[3]
names(changi) <- 'zone'
changi <- changi$zone
changi <- changi[changi!=549060]
boundary <- read.csv('boundary_nodes.csv')
boundary <- boundary[2]
boundary$weight <- c(0.4/7,0.6/5,0.4/7,0.4/7,0.6/5,0.4/7,0.4/7,0.6/5,0.6/5,0.6/5,0.4/7,0.4/7)
delete <- c(21171, 1380033601, 1380027709, 14505, 1380029086, 17903, 14378)
results <- subset(results,!(ID %in% delete))
results_sum <- ddply(results,'Zone',transform,Num=sum(Num))
results_sum <- subset(results_sum,!duplicated(Zone))
results <- merge(results,results_sum[c(2,3)],by='Zone')
results <- results[c(2,1,3,4)]
names(results) <- c('ID','Zone','Num','Rate')
results$Rate <- results$Num/results$Rate
results <- results[c(2,1,3,4)]
a <- aggregate(results$Rate,by=list(Zone=results$Zone),FUN=cumsum)
results <- results[order(results$Zone),]
results$Cumsum <- unlist(a$x)
boundary$Cumsum <- cumsum(boundary$weight)
# Preparation of trips data
# Name the OD matrix as InputOD.csv
dat <- read.table('InputOD.csv',sep='',dec='.',stringsAsFactors=FALSE,head=T)
hour <- 2
trips <- subset(dat,dat$desti %in% changi & dat$origin %in% changi)
trips$AM <- hour*(trips$AMLGV + trips$AMHGV)
trips$PM <- hour*(trips$PMLGV + trips$PMHGV)
trips$rd_AM <- runif(nrow(trips),min=floor(trips$AM),max=ceiling(trips$AM))
trips$rd_PM <- runif(nrow(trips),min=floor(trips$PM),max=ceiling(trips$PM))
for(i in 1:nrow(trips)){
if(trips[i,11]>=trips[i,9]){
trips[i,9] <- floor(trips[i,9])
}
else{
trips[i,9] <- ceiling(trips[i,9])
}
if(trips[i,12]>=trips[i,10]){
trips[i,10] <- floor(trips[i,10])
}
else{
trips[i,10] <- ceiling(trips[i,10])
}
}
trips <- trips[1:10]
am_trips <- trips[c(1,2,9)]
pm_trips <- trips[c(1,2,10)]
am_trips <- am_trips[rep(row.names(am_trips),am_trips$AM),]
pm_trips <- pm_trips[rep(row.names(pm_trips),pm_trips$PM),]
am_trips$rd_o <- runif(nrow(am_trips),min=0,max=1)
am_trips$rd_d <- runif(nrow(am_trips),min=0,max=1)
am_trips$node_o <- NA
am_trips$node_d <- NA
pm_trips$rd_o <- runif(nrow(pm_trips),min=0,max=1)
pm_trips$rd_d <- runif(nrow(pm_trips),min=0,max=1)
pm_trips$node_o <- NA
pm_trips$node_d <- NA
leaving <- subset(dat,!(dat$desti %in% changi) & dat$origin %in% changi)
leaving$AM <- hour*(leaving$AMLGV + leaving$AMHGV)
leaving$PM <- hour*(leaving$PMLGV + leaving$PMHGV)
leaving$rd_AM <- runif(nrow(leaving),min=floor(leaving$AM),max=ceiling(leaving$AM))
leaving$rd_PM <- runif(nrow(leaving),min=floor(leaving$PM),max=ceiling(leaving$PM))
for(i in 1:nrow(leaving)){
if(leaving[i,11]>=leaving[i,9]){
leaving[i,9] <- floor(leaving[i,9])
}
else{
leaving[i,9] <- ceiling(leaving[i,9])
}
if(leaving[i,12]>=leaving[i,10]){
leaving[i,10] <- floor(leaving[i,10])
}
else{
leaving[i,10] <- ceiling(leaving[i,10])
}
}
leaving <- leaving[1:10]
am_leaving <- leaving[c(1,2,9)]
pm_leaving <- leaving[c(1,2,10)]
am_leaving <- am_leaving[rep(row.names(am_leaving),am_leaving$AM),]
pm_leaving <- pm_leaving[rep(row.names(pm_leaving),pm_leaving$PM),]
am_leaving$rd_o <- runif(nrow(am_leaving),min=0,max=1)
am_leaving$rd_d <- runif(nrow(am_leaving),min=0,max=1)
am_leaving$node_o <- NA
am_leaving$node_d <- NA
pm_leaving$rd_o <- runif(nrow(pm_leaving),min=0,max=1)
pm_leaving$rd_d <- runif(nrow(pm_leaving),min=0,max=1)
pm_leaving$node_o <- NA
pm_leaving$node_d <- NA
entering <- subset(dat,dat$desti %in% changi & !(dat$origin %in% changi))
entering$AM <- hour*(entering$AMLGV + entering$AMHGV)
entering$PM <- hour*(entering$PMLGV + entering$PMHGV)
entering$rd_AM <- runif(nrow(entering),min=floor(entering$AM),max=ceiling(entering$AM))
entering$rd_PM <- runif(nrow(entering),min=floor(entering$PM),max=ceiling(entering$PM))
for(i in 1:nrow(entering)){
if(entering[i,11]>=entering[i,9]){
entering[i,9] <- floor(entering[i,9])
}
else{
entering[i,9] <- ceiling(entering[i,9])
}
if(entering[i,12]>=entering[i,10]){
entering[i,10] <- floor(entering[i,10])
}
else{
entering[i,10] <- ceiling(entering[i,10])
}
}
am_entering <- entering[c(1,2,9)]
pm_entering <- entering[c(1,2,10)]
am_entering <- am_entering[rep(row.names(am_entering),am_entering$AM),]
pm_entering <- pm_entering[rep(row.names(pm_entering),pm_entering$PM),]
am_entering$rd_o <- runif(nrow(am_entering),min=0,max=1)
am_entering$rd_d <- runif(nrow(am_entering),min=0,max=1)
am_entering$node_o <- NA
am_entering$node_d <- NA
pm_entering$rd_o <- runif(nrow(pm_entering),min=0,max=1)
pm_entering$rd_d <- runif(nrow(pm_entering),min=0,max=1)
pm_entering$node_o <- NA
pm_entering$node_d <- NA
# Random trip distributor
for(i in 1:nrow(am_trips)){
temp <- results[results$Zone==am_trips[i,1],c(2,5)]
for(j in 1:nrow(temp)){
if(am_trips[i,4]<=temp[j,2]){
am_trips[i,6] <- temp[j,1]
break
}
}
}
for(i in 1:nrow(am_trips)){
temp <- results[results$Zone==am_trips[i,2],c(2,5)]
for(j in 1:nrow(temp)){
if(am_trips[i,5]<=temp[j,2]){
am_trips[i,7] <- temp[j,1]
break
}
}
}
for(i in 1:nrow(pm_trips)){
temp <- results[results$Zone==pm_trips[i,1],c(2,5)]
for(j in 1:nrow(temp)){
if(pm_trips[i,4]<=temp[j,2]){
pm_trips[i,6] <- temp[j,1]
break
}
}
}
for(i in 1:nrow(pm_trips)){
temp <- results[results$Zone==pm_trips[i,2],c(2,5)]
for(j in 1:nrow(temp)){
if(pm_trips[i,5]<=temp[j,2]){
pm_trips[i,7] <- temp[j,1]
break
}
}
}
# Cross-boundary trips
for(i in 1:nrow(am_leaving)){
temp <- results[results$Zone==am_leaving[i,1],c(2,5)]
for(j in 1:nrow(temp)){
if(am_leaving[i,4]<=temp[j,2]){
am_leaving[i,6] <- temp[j,1]
break
}
}
}
for(i in 1:nrow(am_leaving)){
for(j in 1:nrow(boundary)){
if(am_leaving[i,5]<=boundary[j,3]){
am_leaving[i,7] <- boundary[j,1]
break
}
}
}
for(i in 1:nrow(pm_leaving)){
temp <- results[results$Zone==pm_leaving[i,1],c(2,5)]
for(j in 1:nrow(temp)){
if(pm_leaving[i,4]<=temp[j,2]){
pm_leaving[i,6] <- temp[j,1]
break
}
}
}
for(i in 1:nrow(pm_leaving)){
for(j in 1:nrow(boundary)){
if(pm_leaving[i,5]<=boundary[j,3]){
pm_leaving[i,7] <- boundary[j,1]
break
}
}
}
for(i in 1:nrow(am_entering)){
temp <- results[results$Zone==am_entering[i,2],c(2,5)]
for(j in 1:nrow(temp)){
if(am_entering[i,5]<=temp[j,2]){
am_entering[i,7] <- temp[j,1]
break
}
}
}
for(i in 1:nrow(am_entering)){
for(j in 1:nrow(boundary)){
if(am_entering[i,4]<=boundary[j,3]){
am_entering[i,6] <- boundary[j,1]
break
}
}
}
for(i in 1:nrow(pm_entering)){
temp <- results[results$Zone==pm_entering[i,2],c(2,5)]
for(j in 1:nrow(temp)){
if(pm_entering[i,5]<=temp[j,2]){
pm_entering[i,7] <- temp[j,1]
break
}
}
}
for(i in 1:nrow(pm_entering)){
for(j in 1:nrow(boundary)){
if(pm_entering[i,4]<=boundary[j,3]){
pm_entering[i,6] <- boundary[j,1]
break
}
}
}
# Final processing
AM <- rbind(am_entering[c(6,7)],am_leaving[c(6,7)],am_trips[c(6,7)])
names(AM) <- c('origin','desti')
AM$time <- runif(nrow(AM),min=7,max=9)
PM <- rbind(pm_entering[c(6,7)],pm_leaving[c(6,7)],pm_trips[c(6,7)])
names(PM) <- c('origin','desti')
PM$time <- runif(nrow(PM),min=18,max=20)
|
2972c212728f506b7c1f59f675d120e7f49bf503 | b450a40cfc3a4aa9e913c1b656b3415d670388da | /_lousy_new_proj/bn/fatigue_net_assist.R | 18f047bce65ab49069e473a60f2fcfc126c3305f | [] | no_license | hurrialice/b_affy | a9058f56dc8342d54b358dba5ab8dbfc1fcb4dae | 8f4bbbc8e0489c36e93bb4d847bd80c3b3254397 | refs/heads/master | 2021-09-04T06:58:57.036127 | 2018-01-16T23:03:34 | 2018-01-16T23:03:34 | 105,929,300 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,147 | r | fatigue_net_assist.R | library(bnlearn)
library(readr)
library(dplyr)
library(caret)
library(limma)
library(ROCR)
library(future)
library(annotate)
library(hgu133plus2.db)
# get background genes
chip <- hgu133plus2.db
bg_genes <- read_rds("bg_genes.rds")
#bg_genes <- AnnotationDbi::select(x = chip, keys = rownames(data.matrix), keytype = 'PROBEID',
# columns = c('ENTREZID','GENENAME','SYMBOL')) %>% tbl_df()
# for the sake of easier visualization
look <- function(m) m[1:10, 1:10]
# plan for parallel processing via future
future::plan(multicore)
## ------ data partitions ------
set.seed(1234)
make_par <- function(){
nsamples <- ncol(data.matrix)
trainid <- list()
# browser()
for (i in 1:nsamples){
a <- as.integer(i)
# note long vector be placed first
trainid[[i]] <- setdiff(seq(nsamples), a)
}
return(trainid)
}
# init list-like object for data storage
init_list <- function(clist){
for (i in clist){
assign(x = i, value = list(), envir = globalenv())
}
}
## ------ discretilize by gene ---------
make_discre <- function(m, psigma){
o <- apply(m, 1, function(v){
v0 <- (v - mean(v))/sd(v)
out <- v0
out[v0 < -psigma] <- -1
out[abs(v0) < psigma] <- 0
out[v0 > psigma] <- 1
return(out)
})
t(o)
}
## ------- DE -----------
DE_pipe <- function(m, lab){
# make matrix
design <- model.matrix(~0 + lab)
colnames(design) <- c("AMI", "control")
fit <- lmFit(m, design)
contrast.matrix <- makeContrasts(AMI-control, levels=design)
# compare
huvec_fits <- contrasts.fit(fit, contrast.matrix)
huvec_ebFit <- eBayes(huvec_fits)
tab <- topTable(huvec_ebFit, coef=1,lfc = 0.4, number = 1000)
tab <- tab[tab$P.Value < 0.05,]
return(tab)
}
## ---------- entrez and symbols ---------
anno <- function(chip = hgu133plus2.db, t ){
gs2 <- AnnotationDbi::select(x = chip, keys = rownames(tab), keytype = 'PROBEID',
columns = c('ENTREZID','GENENAME','SYMBOL')) %>% tbl_df()
tab$PROBEID <- rownames(tab)
tab <- left_join(tab, gs2) %>% tbl_df()
}
### ----- featrue tables --------
shrink_features <- function(ori_dm, fea_names, dict = bg_genes){
m <- t(ori_dm)
m_sub <- m[,fea_names]
#browser()
colnames(m_sub) <- dict$SYMBOL[match(colnames(m_sub), dict$PROBEID)]
df <- as.data.frame(m_sub)
df$label <- ifelse(ph$disease_state == 'AMI', 1, 0)
df <- apply(df, c(1,2), as.factor)
as.data.frame(df)
}
## ----- boot tabu -----------
# get the from-to dataframe
boot_tabu <- function(ori_tab){
start_tan <- tree.bayes(ori_tab, 'label', head(colnames(ori_tab), -1))
bt <- boot.strength(data = ori_tab, R = 200, m = nrow(ori_tab), algorithm = 'tabu', debug = T,
algorithm.args = list(start = start_tan, tabu = 50))
}
|
ca529404d1d1b0deccdfe09f23aced982421494f | b84a28f8aa60de0a523c39578061e9026ab7ac42 | /R/average_dilutions.R | b51d6b832a16d8bf9fed67e0685090d2e4006260 | [
"MIT"
] | permissive | aef1004/bactcountr | 8fd09946157d3b080de7ea2c4a99a3b7b350e3a0 | 587090da71f2a802991cd8b06ce238a72ee3667f | refs/heads/master | 2023-08-22T16:16:21.587341 | 2021-10-20T17:39:14 | 2021-10-20T17:39:14 | 281,457,825 | 0 | 1 | NOASSERTION | 2020-09-18T20:37:36 | 2020-07-21T17:09:14 | R | UTF-8 | R | false | false | 738 | r | average_dilutions.R | #' Average CFUs for each sample
#'
#' This is the method used by Mercedes lab - they take all of the dilutions for which there are countable CFUs and find an average of the CFUs for each sample. This is an alternative to "pick_one_dilution."
#'
#' @param df dataframe that contains columns: dilution, CFU, and grouping variables
#' @param CFU_column the name of the CFU column in the dataframe
#' @param grouping_columns the columns names to group the data based on (i.e., Group, Timepoint)
#'
#' @return
#' @export
#'
#' @examples average_dilutions(CFU_raw_formatted, "CFUs", c("group", "organ", "replicate"))
#'
#' @importFrom dplyr %>%
#' @importFrom rlang .data
#'
average_dilutions <- function(df, CFU_column, grouping_columns) {
}
|
e9155530881972ae74c901c3ba64ebe918ef2d92 | 3100f09de6e070c6e2c1b296400cd6844b14555d | /man/read.GENEPOP.Rd | 625a8a4b6c500d3d2868bbaba5a915c45aa17ed9 | [] | no_license | cran/FinePop2 | 84d6cd924063d7338b29d8ef438961a614eef3f4 | 6dc575b85a84dfb674a524ff6ee7c1fd4ff5ab2f | refs/heads/master | 2021-02-06T22:31:35.842384 | 2020-02-28T09:20:02 | 2020-02-28T09:20:02 | 243,953,376 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,870 | rd | read.GENEPOP.Rd | \name{read.GENEPOP}
\alias{read.GENEPOP}
\title{Create a genotype data object of populations from a GENEPOP format file.}
\description{
This function reads a GENEPOP format file (Rousset 2008) and parse it into an R data object. This data object provides a summary of genotype/haplotype of each sample, allele frequency in each population, and marker status. This data object is used in downstream analysis of this package.
}
\usage{
read.GENEPOP(genepop, popname = NULL)
}
\arguments{
\item{genepop}{A character value specifying the name of the GENEPOP file to be analyzed.}
\item{popname}{A character value specifying the name of the plain text file containing the names of subpopulations to be analyzed. This text file must not contain other than subpopulation names. The names must be separated by spaces, tabs or line breaks. If this argument is omitted, serial numbers will be assigned as subpopulation names.}
}
\value{
\item{num_pop}{Number of subpopulations.}
\item{pop_sizes}{Number of samples in each subpopulation.}
\item{pop_names}{Names of subpopulations.}
\item{ind_names}{Names of samples in each subpopulation.}
\item{num_loci}{Number of loci.}
\item{loci_names}{Names of loci.}
\item{num_allele}{Number of alleles at each locus.}
\item{allele_list}{A list of alleles at each locus.}
\item{ind_count}{Observed count of genotyped samples in each subpopulation at each locus.}
\item{allele_count}{Observed count of genotyped alleles in each subpopulation at each locus.}
\item{allele_freq}{Observed allele frequencies in each subpopulation at each locus.}
\item{genotype}{Genotypes of each sample at each locus in haploid designation.}
\item{call_rate_loci}{Call rate of each locus (rate of genotyped samples at each locus).}
\item{call_rate_ind}{Call rate of each sample (rate of genotyped markers for each sample).}
\item{He}{Expected heterozigosity in each subpopulation.}
\item{Ho}{Observed heterozigosity in each subpopulation.}
}
\references{
Rousset F (2008) Genepop'007: a complete reimplementation of the Genepop software for Windows and Linux. \emph{Mol. Ecol. Resources}, 8, 103-106.
}
\author{Reiichiro Nakamichi}
\examples{
# Example of GENEPOP file
data(jsmackerel)
jsm.ms.genepop.file <- tempfile()
jsm.popname.file <- tempfile()
cat(jsmackerel$MS.genepop, file=jsm.ms.genepop.file, sep="\n")
cat(jsmackerel$popname, file=jsm.popname.file, sep=" ")
# Read GENEPOP file with subpopulation names.
# Prepare your GENEPOP file and population name file in the working directory.
# Replace "jsm.ms.genepop.file" and "jsm.popname.file" by your file names.
popdata <- read.GENEPOP(genepop=jsm.ms.genepop.file, popname=jsm.popname.file)
# Read GENEPOP file without subpopulation names.
popdata.noname <- read.GENEPOP(genepop=jsm.ms.genepop.file)
}
|
81b08955569213ec6f5433391f8abcfe62953c28 | dd9b083775295063b049f0a8694e501dacbf0f6a | /canopy-height.r | 88e7b8e872cf6a73af992abc2d722e62ea188bfe | [] | no_license | megawac/geog471-scripts | 46be54146196cad393918aaea54811d28e600c78 | a524f62891611fe50fb61aab0c32e7b240228855 | refs/heads/master | 2023-06-07T04:49:40.480079 | 2016-03-21T12:40:37 | 2016-03-21T12:40:37 | 54,227,653 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,971 | r | canopy-height.r | # Adapted from http://quantitativeecology.org/using-rlidar-and-fusion-to-delineate-individual-trees-through-canopy-height-model-segmentation/
# expects that we're using our 471 structure and that the group folder is configured as
# the Z:/ drive on the local machine
# places all the canopy height models in the QUBS_LAS/Derived folder along with corresponding
# assets used to derive the model.
# First things first, we need to set up some directories to keep the raw data
# separate from that produced by the analysis. I basically have an input (.las)
# and output (.las, .dtm, etc) directory in a dropbox folder.
mainDir <- file.path("F:", "Users", "Dynomite", "471", "QUBS_LAS")
inDir <- file.path(mainDir, "LAS3")
outDir<- file.path(mainDir, "Derived")
fusionPath <- file.path("C:","Fusion")
dir.create(outDir)
# Concat strings helper
p <- function(..., sep='') {
paste(..., sep=sep, collapse=sep)
}
name <- 'processed3'# strsplit(image, '[.]')[[1]][1]
res <- 1
images <- list.files(inDir, pattern="*.las")
imagePaths <- sapply(images, function(i) {
file.path(inDir, i)
})
allImageString <- paste(imagePaths, collapse=' ')
# Read in the .las file and use FUSION to produce a .las file of points that
# approximate the ground's surface (bare-earth points).
# http://forsys.cfr.washington.edu/fusion/FUSION_manual.pdf#page=94&zoom=auto,70,720
if (!file.exists(file.path(outDir, p(name, "-ground-points.las")))) {
system(paste(file.path(fusionPath, "groundfilter.exe"),
"/gparam:0 /wparam:1 /tolerance:1 /iterations:10",
file.path(outDir, p(name, "-ground-points.las")),
res, #grid size in meters
allImageString,
sep=" "))
} else {
print("skipping groundfilter")
}
# Next we use gridSurfaceCreate to compute the elevation of each grid cell using the
# average elevation of all points within the cell. Check the manual for arguments and uasge
# http://forsys.cfr.washington.edu/fusion/FUSION_manual.pdf#page=88&zoom=auto,70,720
if (!file.exists(file.path(outDir, p(name, "-surface", ".dtm")))) {
system(paste(file.path(fusionPath, "gridsurfacecreate.exe"),
file.path(outDir, p(name, "-surface", ".dtm")),
p(res, " M M 1 12 2 2"),
file.path(outDir, p(name, "-ground-points.las")),
sep=" "))
} else {
print("skipping groundsurface")
}
# Next we use CanopyModel to create a canopy surface model using a LIDAR point cloud.
# By default, the algorithm used by CanopyModel assigns the elevation of the highest return within
# each grid cell to the grid cell center.
#http://forsys.cfr.washington.edu/fusion/FUSION_manual.pdf#page=32&zoom=auto,70,720
if (!file.exists(file.path(outDir, p(name, "-final", ".dtm")))) {
system(paste(file.path(fusionPath, "canopymodel.exe"),
p("/ground:", file.path(outDir, p(name, "-surface", ".dtm"))),
file.path(outDir, p(name, "-final", ".dtm")),
p(res, " M M 1 12 2 2"),
allImageString,
sep=" "))
} else {
print("skipping canopymodel")
}
# Lastly, we use DTM2ASCII to convert the data stored in the PLANS DTM format into ASCII raster
# an file. Such files can be imported into GIS software such as ArcGIS or QGIS.
# http://forsys.cfr.washington.edu/fusion/FUSION_manual.pdf#page=88&zoom=auto,70,720
if (!file.exists(file.path(outDir, p(name, ".asc")))) {
system(paste(file.path(fusionPath, "dtm2ascii.exe"),
file.path(outDir, p(name, "-final", ".dtm")),
file.path(outDir, p(name, ".asc")),
sep=" "))
} else {
print("skipping ascii")
}
# Second, we process the resulting CHM in rLiDAR
#install.packages("rLiDAR", type="source")
#install.packages("raster", dependencies = TRUE)
library(rLiDAR)
library(raster)
library(rgeos)
#
# # Import the LiDAR-derived CHM file that we just made in the above section and plot it
# chm<-raster(file.path(outDir, p(name, ".asc")))
# #
# png(filename=file.path(mainDir, outDir, p(name, "-chm.png")))
# plot(chm)
# dev.off()
|
d2499ddf864a49fce1473eae659d440b89a32975 | b67cbead02c2804cc672f425a3831db87839ac56 | /R/ODE.R | 06580dd55680494ecd2fb0882c470cb34a60ff10 | [] | no_license | stanleesocca/scientific-computing-r | b7babdb03d5ae0c938748d40fc63e36ef3dee9d5 | e45f8cc4372e73fe5e5ef799bab3f40763c6a3f4 | refs/heads/master | 2022-02-13T16:41:57.442722 | 2017-06-09T21:16:23 | 2017-06-09T21:16:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 369 | r | ODE.R | #########
# ODE.R
#
setClass("ODE", slots = c(
state = "numeric",
rate = "numeric"
))
setMethod("getState", "ODE", function(object, ...) {
# Gets the state variables.
return(object@state)
})
setMethod("getRate", "ODE", function(object, state, rate, ...) {
# Gets the rate of change using the argument's state variables.
invisible(object)
}) |
ac14cabc0b906e60dd7e43a3e8d8706692b0de27 | c48de5696547bfc707c883859929adadedfee808 | /Accidentalidad/app.R | 59cf23ef1e507bed7c14e32615039f6860bbaa10 | [] | no_license | deescobarc/trabajo_tae | 04d7c9b57ec139754caff952c9cc6a8c2b3e0071 | 20e9520bd090e792c7c8fe02cb3473cc481a6b1a | refs/heads/master | 2023-01-22T13:55:14.822097 | 2020-12-02T00:25:04 | 2020-12-02T00:25:04 | 317,703,862 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 29,762 | r | app.R | #Cargando librerias necesarias
library(shiny)
library(shinydashboardPlus)
library(shinydashboard)
library(DT)
library(leaflet)
library(tidyverse)
library(dplyr)
#Se cargan los datos
load('datos.RData')
load('prediccion_diaria.RData')
#Definiendo menu de inicio
ui <- dashboardPagePlus(skin="green",
dashboardHeaderPlus(title="Menu principal",titleWidth = 250, fixed = TRUE),
dashboardSidebar(width = 250,
sidebarMenu(style="position:fixed; overflow:visible",
menuItem(("Inicio"), tabName = "inicio", icon = icon("home")),
menuItem("Visualización", tabName = "visualización", icon = icon("th")),
menuItem("Predicción", tabName = "prediccion", icon = icon("angle-double-right")),
menuItem("Agrupamiento", tabName = "agrupamiento", icon = icon("map-marked")),
menuItem("Creditos", tabName = "creditos", icon = icon("hands-helping"))
)
),
dashboardBody(
tabItems(
#Boton inicio
tabItem(tabName = "inicio",
br(),
h1("Accidentalidad en Medellín",align="center",style="color:#1C762F"),
br(),
br(),
p("Esta aplicación web tiene como finalidad poder visualizar y analizar con mas facilidad los datos de accidentalidad
de la ciudad de Medellín ocurridos entre 2014 y 2018, por lo cual se utilizó la información de ", a(href="https://geomedellin-m-medellin.opendata.arcgis.com/search?tags=movilidad", "Datos abiertos de movilidad"),
"que pública la alcaldía de Medellín.",style = "font-family: 'times'; font-size:18pt; margin-left: 3em; margin-right: 3em;text-align: justify"),
br(),
p("En esta aplicacioón usted encontrará tres modulos funcionales donde podrá:",style = "font-family: 'times'; font-size:18pt; margin-left: 3em; margin-right: 3em;text-align: justify"),
br(),
tags$ul(
tags$li(p("Visualizar los datos historicos de accidentalidad en la ciudad de Medellín entre los años 2014 y 2018 por
tipo de accidente (atropello, caida de ocupante, choque, incendio, volcamiento, otro), donde usted podrá elegir el periodo de su interes.",style = "font-family: 'times'; font-size:18pt;text-align: justify;margin-right: 1.5em"),style = "margin-left: 3em;margin-right: 3em;text-align: justify"),
tags$li(p("Visualizar una gráfica de prediccion para el tipo de accidente Choque, donde usted podrá definir una ventana temporal y visualizar los datos predichos para dicho periodo.",style = "font-family: 'times'; font-size:18pt;text-align: justify;margin-right: 1.5em"),style = "margin-left: 3em;margin-right: 3em;text-align: justify"),
tags$li(p("Usted podrá visualizar un mapa de la ciudad de Medellín, con los accidentes presentados por cada barrio, los datos aqui presentados se encientran agrupados según criterios definidos en dicho módulo.",style = "font-family: 'times'; font-size:18pt;text-align: justify;margin-right: 1.5em"),style = "margin-left: 3em;margin-right: 3em;text-align: justify")
)
),
#Boton visualizacion
tabItem(tabName = "visualización",
br(),
h1("Visualización de datos históricos",align="center",style="color:#1C762F"),
br(),
div(align="center",
# Select type of trend to plot
selectInput(inputId = "type", label = strong("Seleccione el tipo de accidente desea visualizar"),
choices = unique(datos$CLASE),
selected = "Choque"),
selectInput(inputId = "comuna", label = strong("Seleccione la comuna que desea visualizar"),
choices = unique(datos$COMUNA),
selected = "--"),
# Select date range to be plotted
dateRangeInput("date", strong("Seleccione la ventana de tiempo"), start = "2014-01-01", end = "2014-12-31",
min = "2014-01-01", max = "2018-12-31"),
h2("Mapa histórico",align="center",style="color:#1C762F"),
leafletOutput("result",height = 400, width = 750)
),
br(),
fluidRow(
box(title="Total solo daños",width=4,background = "green",
h3(textOutput("total")),align="center"),
box(title="Total heridos",width=4,background = "orange",
h3(textOutput("heridos")),align="center"),
box(title="Total muertes",width=4,background = "black",
h3(textOutput("muertes")),align="center")
),
br(),
h2(actionButton("ver","VISUALIZAR DETALLE BARRIOS",icon("table"), style="background-color:#2A68CF; color:#FFFFFF"), align="center"),
dataTableOutput('barrios')
),
#Boton prediccion
tabItem(tabName = "prediccion",
br(),
h1("Predicción de la accidentalidad en Medellín para el año 2020",align="center",style="color:#1C762F"),
br(),
sidebarLayout(
sidebarPanel(
dateRangeInput(inputId = "rango", label = "Seleccione fecha", format = "yyyy-mm-dd", start = "2020-01-01", end = "2020-01-31", min = "2020-01-01", max = "2020-12-31"),
selectInput("select_comuna", label = "Seleccione una comuna",
choices = list("Aranjuez" = "Aranjuez",
"Belén" = "Belén",
"Buenos Aires" = "Buenos Aires",
"Castilla" = "Castilla",
"Corregimiento de Altavista" = "Corregimiento de Altavista",
"Corregimiento de San Antonio de Prado" = "Corregimiento de San Antonio de Prado",
"Corregimiento de San Cristóbal" = "Corregimiento de San Cristóbal",
"Corregimiento de Santa Elena" = "Corregimiento de Santa Elena",
"Doce de Octubre" = "Doce de Octubre",
"El Poblado" = "El Poblado",
"Guayabal" = "Guayabal",
"La América" = "La América",
"La Candelaria" = "La Candelaria",
"Laureles Estadio" = "Laureles Estadio",
"Manrique" = "Manrique",
"Popular" = "Popular",
"Robledo" = "Robledo",
"San Javier" = "San Javier",
"Santa Cruz" = "Santa Cruz",
"Villa Hermosa" = "Villa Hermosa"
), selected = "Robledo")
),
# Show a plot of the generated distribution
mainPanel(
h3("Modelo día"),
p('A continuación se muestra la predicción diaria de accidentes de clase choque. A la izquierda puede seleccionar el rango de fechas y la comuna que desee predecir y de forma dinámica se actualizará el gráfico de accidentalidad para el año 2020.'),
plotOutput("diario")
)
),
sidebarLayout(
sidebarPanel(
sliderInput("slider_sem", label = "Seleccionar rango de semanas", min = 0,
max = 52, value = c(20, 30)),
selectInput("select_comuna_sem", label = "Seleccione una comuna",
choices = list("Aranjuez" = "Aranjuez",
"Belén" = "Belén",
"Buenos Aires" = "Buenos Aires",
"Castilla" = "Castilla",
"Corregimiento de Altavista" = "Corregimiento de Altavista",
"Corregimiento de San Antonio de Prado" = "Corregimiento de San Antonio de Prado",
"Corregimiento de San Cristóbal" = "Corregimiento de San Cristóbal",
"Corregimiento de Santa Elena" = "Corregimiento de Santa Elena",
"Doce de Octubre" = "Doce de Octubre",
"El Poblado" = "El Poblado",
"Guayabal" = "Guayabal",
"La América" = "La América",
"La Candelaria" = "La Candelaria",
"Laureles Estadio" = "Laureles Estadio",
"Manrique" = "Manrique",
"Popular" = "Popular",
"Robledo" = "Robledo",
"San Javier" = "San Javier",
"Santa Cruz" = "Santa Cruz",
"Villa Hermosa" = "Villa Hermosa"
), selected = "Robledo")
),
# Show a plot of the generated distribution
mainPanel(
h3("Modelo semana"),
p('A continuación se muestra la predicción semanal de accidentes de clase choque. A la izquierda puede seleccionar el rango de semanas y la comuna que desee predecir y de forma dinámica se actualizará el gráfico de accidentalidad para el año 2020.'),
plotOutput("semanal")
)
),
sidebarLayout(
sidebarPanel(
sliderInput("slider_mes", label = "Seleccionar rango de meses", min = 1,
max = 12, value = c(1, 12)),
selectInput("select_comuna_mes", label = "Seleccione una comuna",
choices = list("Aranjuez" = "Aranjuez",
"Belén" = "Belén",
"Buenos Aires" = "Buenos Aires",
"Castilla" = "Castilla",
"Corregimiento de Altavista" = "Corregimiento de Altavista",
"Corregimiento de San Antonio de Prado" = "Corregimiento de San Antonio de Prado",
"Corregimiento de San Cristóbal" = "Corregimiento de San Cristóbal",
"Corregimiento de Santa Elena" = "Corregimiento de Santa Elena",
"Doce de Octubre" = "Doce de Octubre",
"El Poblado" = "El Poblado",
"Guayabal" = "Guayabal",
"La América" = "La América",
"La Candelaria" = "La Candelaria",
"Laureles Estadio" = "Laureles Estadio",
"Manrique" = "Manrique",
"Popular" = "Popular",
"Robledo" = "Robledo",
"San Javier" = "San Javier",
"Santa Cruz" = "Santa Cruz",
"Villa Hermosa" = "Villa Hermosa"
), selected = "Robledo")
),
# Show a plot of the generated distribution
mainPanel(
h3("Modelo mensual"),
p('A continuación se muestra la predicción mensual de accidentes de clase choque. A la izquierda puede seleccionar el rango de meses y la comuna que desee predecir y de forma dinámica se actualizará el gráfico de accidentalidad para el año 2020.'),
plotOutput("mensual")
)
)
),
tabItem(tabName = "agrupamiento",
br(),
h1("Predicción de la accidentalidad en Medellín para el año 2020",align="center",style="color:#1C762F"),
fluidRow(
box(
h3("Descripción del agrupamiento",align="center",style="color:#1C762F"),
br(),
fluidRow(
box(h4("GRUPO 1",style="color:#1C762F"),
p("Barrios con alta accidentalidad y alta probabilidad de lesiónes (heridas, muerte).",style = "font-family: 'times'; font-size:14pt")),
box(h4("GRUPO 2",style="color:#1C762F"),
p("Barrios con alta accidentalidad y baja probabilidad de lesiónes (heridas, muerte).",style = "font-family: 'times'; font-size:14pt"))),
br(),
fluidRow(
box(h4("GRUPO 3",style="color:#1C762F"),
p("Barrios con baja accidentalidad y alta probabilidad de lesiónes (heridas, muerte).",style = "font-family: 'times'; font-size:14pt")),
box(h4("GRUPO 4",style="color:#1C762F"),
p("Barrios con baja accidentalidad y baja probabilidad de lesiónes (heridas, muerte).",style = "font-family: 'times'; font-size:14pt")))
),
box(
leafletOutput("grupos",height = 500, width = 500)
)
)
),
#Boton creditos
tabItem(tabName = "creditos",
br(),
h1("Creditos",align="center",style="color:#1C762F"),
br(),
h3("Proyecto elaborado por:",align="center",style="color:#000000"),
br(),
fluidRow(
box(p("Wilder Camilo Castro Ramos",style = "font-family: 'times'; font-size:18pt;text-align: center"),
p("Ingeniería de sistemas",style = "font-family: 'times'; font-size:18pt;text-align: center"),
p("wccastror@unal.edu.co",style = "font-family: 'times'; font-size:18pt;text-align: center")),
box(p("David Esteban Escobar Castro",style = "font-family: 'times'; font-size:18pt;text-align: center"),
p("Ingeniería de sistemas",style = "font-family: 'times'; font-size:18pt;text-align: center"),
p("deescobarc@unal.edu.co",style = "font-family: 'times'; font-size:18pt;text-align: center"))
),
br(),
h3("Técnicas de aprendizaje estadístico",align="center",style="color:#000000"),
h3("Universidad Nacional de Colombia",align="center",style="color:#000000"),
h3("Sede Medellín",align="center",style="color:#000000"),
h3("2020",align="center",style="color:#000000")
)
)
)
)
#Definiendo logica server
server <- function(input, output) {
v <- reactiveValues(datos1=NULL)
#Definiedo el mapa de visualizacion
output$result <- renderLeaflet({
datos1 <- subset(datos, CLASE==input$type & FECHA <= input$date & COMUNA==input$comuna)
#Funcion de color para mapa de visualizacion
getColor <- function(datos1){
sapply(datos1$GRAVEDAD, function(GRAVEDAD){
if(GRAVEDAD=='MUERTO'){
"black"
} else if(GRAVEDAD=='HERIDO'){
"orange"
} else{
"green"
}
}
)
}
#Graficando mapa
leaflet() %>%
addTiles() %>%
addCircleMarkers(lng=datos1$X,lat =datos1$Y, label=datos1$BARRIO, color=getColor(datos1),radius = 5,opacity = 1, fillOpacity = 0.1)
})
output$total <- renderText({
datos1 <- subset(datos, CLASE==input$type & FECHA <= input$date & COMUNA==input$comuna)
sum(datos1$GRAVEDAD=='SOLO DAÑOS')
})
#Definiendo datos para la tabla de barrios
output$heridos <- renderText({
datos1 <- subset(datos, CLASE==input$type & FECHA <= input$date & COMUNA==input$comuna)
sum(datos1$GRAVEDAD=='HERIDO')
})
output$muertes <- renderText({
datos1 <- subset(datos, CLASE==input$type & FECHA <= input$date & COMUNA==input$comuna)
sum(datos1$GRAVEDAD=='MUERTO')
})
observeEvent(input$ver, {
v$datos1 <- datos %>%
filter(datos$CLASE==input$type & datos$FECHA <= input$date & datos$COMUNA==input$comuna)%>%
count(BARRIO,GRAVEDAD, name = "CANTIDAD")
})
output$barrios <- renderDT(v$datos1)
#Mapa de agrupamiento
output$grupos <- renderLeaflet({
leaflet(datosBase) %>%
addTiles() %>%
addCircleMarkers(lng = datosBase$X,lat = datosBase$Y, label=datosBase$BARRIO, labelOptions = labelOptions(noHide = F), color = "black", fillColor= ~pal(datosBase$GRUPO),radius = 8,opacity = 2, fillOpacity = 1,
popup =paste0("<strong>Barrio: </strong>",
datosBase$BARRIO,
"<br>",
"<strong>Comuna: </strong>",
datosBase$COMUNA,
"<br>",
"<strong>Grupo: </strong>",
datosBase$GRUPO,
"<br>",
"<strong>Promedio accidentes por mes: </strong>",
round((datosBase$TOTAL_ACCIDENTES1)/60,1),
"<br>",
"<strong>Promedio heridos por mes: </strong>",
round(datosBase$PROMEDIO_HERIDOS_X_MES,1),
"<br>",
"<strong>Promedio muertes por mes: </strong>",
round(datosBase$PROMEDIO_MUERTES_X_MES,1),
"<br>",
"<strong>% Lesionado: </strong>",
round((datosBase$TOTAL_ACCIDENTES/datosBase$TOTAL_ACCIDENTES1)*100,1),
"<b> %</b>"
))%>%
addLegend("bottomright", pal=pal, values=datosBase$GRUPO,
title="Grupos")
})
output$diario <- renderPlot({
d <- data_frame_pred %>%
filter(fechas >= input$rango[1], fechas <= input$rango[2], Comuna_nombre == input$select_comuna) %>%
group_by(fechas) %>% summarise(sum = sum(predicciones_nuevas))
ggplot(data = d,aes(x = fechas, y = sum)) + geom_bar(stat="identity", position="dodge") + xlab("Fechas seleccionadas") + ylab ("Cantidad de Accidentes") + ggtitle(paste("Accidentalidad predecida para la comuna",input$select_comuna,"por día" )) + theme (plot.title = element_text(size=rel(1), #Tamaño relativo de la letra del título
vjust=2, #Justificación vertical, para separarlo del gráfico
face="bold", #Letra negrilla. Otras posibilidades "plain", "italic", "bold" y "bold.italic"
lineheight=1.5)) + scale_x_date(date_minor_breaks = "1 day")
})
output$semanal <- renderPlot({
d <- data_frame_pred_sem %>%
filter(Semana >= input$slider_sem[1], Semana <= input$slider_sem[2], Comuna_nombre == input$select_comuna_sem) %>%
group_by(Semana) %>% summarise(sum = sum(predicciones_nuevas))
ggplot(data = d,aes(x = Semana, y = sum)) + geom_bar(stat="identity", position="dodge") + xlab("Semanas seleccionadas") + ylab ("Cantidad de Accidentes") + ggtitle(paste("Accidentalidad predecida para la comuna",input$select_comuna_sem,"por semana" )) + theme (plot.title = element_text(size=rel(1), #Tamaño relativo de la letra del título
vjust=2, #Justificación vertical, para separarlo del gráfico
face="bold", #Letra negrilla. Otras posibilidades "plain", "italic", "bold" y "bold.italic"
lineheight=1.5))
})
output$mensual <- renderPlot({
d <- data_frame_pred_mes %>%
filter(Mes >= input$slider_mes[1], Mes <= input$slider_mes[2], Comuna_nombre == input$select_comuna_mes) %>%
group_by(Mes) %>% summarise(sum = sum(predicciones_nuevas))
ggplot(data = d,aes(x = Mes, y = sum)) + geom_bar(stat="identity", position="dodge") + xlab("Meses seleccionados") + ylab ("Cantidad de Accidentes") + ggtitle(paste("Accidentalidad predecida para la comuna",input$select_comuna_sem,"por mes" )) + theme (plot.title = element_text(size=rel(1), #Tamaño relativo de la letra del título
vjust=2, #Justificación vertical, para separarlo del gráfico
face="bold", #Letra negrilla. Otras posibilidades "plain", "italic", "bold" y "bold.italic"
lineheight=1.5)) + scale_x_continuous(breaks=c(1,2,3,4,5,6,7,8,9,10,11,12))
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
748c40cf4c002fcc7aabdb77a5b6a6a280087b4f | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/dbparser/examples/close_db.Rd.R | e79195fc56fcf1dbcaa92aefc6c3561d2d73407b | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 167 | r | close_db.Rd.R | library(dbparser)
### Name: close_db
### Title: Close open drug bank sql database
### Aliases: close_db
### ** Examples
## No test:
close_db()
## End(No test)
|
3b54f5f9dc2589d219bb071bc77887aef4232050 | 476171664236ce5a9173ec11dd06a66187d22ee0 | /tests/testthat/test_shorthand_gl.R | 35086090b82490321e332472b8a515a90342c646 | [] | no_license | michaelwitting/lipidomicsUtils | d03ad4a12f58b7a3728baa4d13e3a25422690dd8 | b818a01bc7ecfc9901cb8ebec381c3c3da444835 | refs/heads/master | 2021-06-13T06:17:53.352351 | 2019-11-06T15:26:43 | 2019-11-06T15:26:43 | 187,207,427 | 2 | 1 | null | 2019-11-06T15:26:44 | 2019-05-17T11:49:28 | R | UTF-8 | R | false | false | 1,514 | r | test_shorthand_gl.R | library(lipidomicsUtils)
context("Generation of Shorthand")
## test for correct shorthand notation of GL
test_that("correct shorthand notation for GLs", {
# glycerolipids (MGs)
expect_equal(get_gl_shorthand("MG(16:0(15Me)/0:0/0:0)", level = "structural"), "MG(17:0/0:0/0:0)")
expect_equal(get_gl_shorthand("MG(16:0(15Me)/0:0/0:0)", level = "molecular"), "MG(0:0_0:0_17:0)")
expect_equal(get_gl_shorthand("MG(16:0(15Me)/0:0/0:0)", level = "species"), "MG(17:0)")
# glycerolipids (MGs)
expect_equal(get_gl_shorthand("MG(0:0/16:0(15Me)/0:0)", level = "structural"), "MG(0:0/17:0/0:0)")
expect_equal(get_gl_shorthand("MG(0:0/16:0(15Me)/0:0)", level = "molecular"), "MG(0:0_0:0_17:0)")
expect_equal(get_gl_shorthand("MG(0:0/16:0(15Me)/0:0)", level = "species"), "MG(17:0)")
# glycerolipids (DGs)
expect_equal(get_gl_shorthand("DG(16:0(15Me)/18:1(9Z)/0:0)", level = "structural"), "DG(17:0/18:1/0:0)")
expect_equal(get_gl_shorthand("DG(16:0(15Me)/18:1(9Z)/0:0)", level = "molecular"), "DG(0:0_17:0_18:1)")
expect_equal(get_gl_shorthand("DG(16:0(15Me)/18:1(9Z)/0:0)", level = "species"), "DG(35:1)")
# glycerolipids (TGs)
expect_equal(get_gl_shorthand("TG(16:0(15Me)/18:1(9Z)/20:4(5Z,8Z,11Z,14Z))", level = "structural"), "TG(17:0/18:1/20:4)")
expect_equal(get_gl_shorthand("TG(16:0(15Me)/18:1(9Z)/20:4(5Z,8Z,11Z,14Z))", level = "molecular"), "TG(17:0_18:1_20:4)")
expect_equal(get_gl_shorthand("TG(16:0(15Me)/18:1(9Z)/20:4(5Z,8Z,11Z,14Z))", level = "species"), "TG(55:5)")
}) |
66ec4add5a8d8ef36d6aa51f89baf4901713138b | e21115a92d1028520cf8171267d38045b41e9b0a | /funcs/meta_plots.R | 9f7bcff1660132c16ce73c4ad1b70e1bf7c979eb | [] | no_license | MRCIEU/opengwas-reports | fb1e437ac0bd63087aae6cafaf3117169e5ec29f | 74213fc57b76fa0a22f81100135ccf96c6b92ae9 | refs/heads/master | 2023-04-25T11:18:51.146300 | 2021-03-26T18:36:43 | 2021-03-26T18:36:43 | 159,704,285 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 849 | r | meta_plots.R | plot_scatter <- function(df, x, y, hide_x_ticks = TRUE) {
x <- enquo(x)
y <- enquo(y)
p <- df %>% {
ggplot(.) +
aes(x = !!x, y = !!y) +
geom_point(color = "#4f8bb7", alpha = 0.5) +
theme_classic()
}
if (hide_x_ticks) {
p <- p + theme(
axis.text.x = element_blank(),
axis.ticks.x = element_blank()
)
}
p
}
plot_hist <- function(df, x) {
x <- enquo(x)
df %>%
filter(is.finite(!!x)) %>%
mutate_at(vars(!!x), as.numeric) %>%
{
ggplot(.) +
aes(x = !!x) +
geom_histogram(fill = "#4f8bb7", alpha = 0.5) +
theme_classic()
}
}
plot_density <- function(df, x) {
x <- enquo(x)
df %>%
filter(is.finite(!!x)) %>%
{
ggplot(.) +
aes(x = !!x) +
geom_density(fill = "#4f8bb7", alpha = 0.3) +
theme_classic()
}
}
|
6fb41a458c33b8b246a03d29c811bdf98cba3975 | 6dd1977fbbebf0925b72bda6bb959b40caccce07 | /LoadFactors2.R | 37ee22a4e37f818b51af7735f5fa8e3df1ef1b45 | [] | no_license | NomoreNoless/Strategy-Comparison | 530620e61d65a9b0ccd777f990bad77d17f27fb7 | e1ba7aa5518d77fc286dd029097c3443d27c4338 | refs/heads/master | 2021-09-04T11:08:52.841765 | 2018-01-18T04:56:59 | 2018-01-18T04:56:59 | 117,786,233 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,075 | r | LoadFactors2.R | LoadFactors2 <- function(Factors=c(),PerformanceMetric="",matrix,benchmark=Inf){
if(benchmark<Inf){
select_col <- c(Factors,PerformanceMetric)
}
else{
select_col <- c(Factors,PerformanceMetric,"id")
}
if("PILOT_SECURITY" %in% Factors){
select_col <- c(select_col,c("G","C","None"))
}
if(!("notional" %in% Factors)){
select_col <- c(select_col,"notional")
}
if(PerformanceMetric=="VWAP_Slip_bps"){
select_col <- c(select_col,"VWAP_Slip_bps")
selected <- matrix[,select_col]
selected <- data.frame(selected)
selected <- selected %>% mutate(VWAP_Slip_bps = ifelse(id==1,-ratio_bps,VWAP_Slip_bps))
}
if(PerformanceMetric=="VWAP_Slip_spr"){
select_col <- c(select_col,"VWAP_Slip_spr")
selected <- matrix[,select_col]
selected <- data.frame(selected)
selected <- selected %>% mutate(VWAP_Slip_spr = ifelse(id==1,-ratio_spread,VWAP_Slip_spr))
}
return(list(selected=selected,Factors=Factors,PerformanceMetric=PerformanceMetric))
}
|
203d1e7e8d7aca7843b416f8aca30c1b0df29d33 | 297d27ef4583958cdce9ad5ae9567f6bd638e781 | /07_using color_for_multiple_lines.R | 15e03c58800dba8cf44aae13b6f872f805e0aa6b | [] | no_license | axc836/data_visualisation | 321887a4f8539c004ca7e4886784c4742c4305df | a505019b39fcdd9e895e0e4597a9284ca6461551 | refs/heads/main | 2023-07-31T10:20:00.129721 | 2021-10-05T09:49:59 | 2021-10-05T09:49:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,579 | r | 07_using color_for_multiple_lines.R | # Multiple line plots with many colors
ggplot(data_multiple_time_series, aes(x = year, y = value)) +
geom_line(aes(color = label)) +
scale_x_continuous(breaks = c(2010, 2012, 2014, 2016, 2018, 2020)) +
labs(title = "Our brand vs competitors",
subtitle = "'000 USD net sales") +
theme(
plot.title = element_text(size = 18, margin = margin(10, 0, 0, 0)),
plot.subtitle = element_text(size = 12, margin = margin(10, 0, 30, 0), color = "gray"),
panel.background = element_rect(fill = NA),
panel.grid.major = element_blank(),
axis.line = element_line(color = "gray35"),
axis.ticks = element_blank(),
axis.title = element_blank(),
axis.text = element_text(size = 10, color = "gray35"),
axis.text.x = element_text(margin = margin(5, 0, 0, 0)),
axis.text.y = element_text(margin = margin(0, 5, 0, 0)),
legend.key = element_rect(fill = NA)
)
# Line plot using colors
ggplot(data_multiple_time_series, aes(x = year, y = value)) +
geom_line(aes(color = category, size = category)) +
scale_colour_manual(values = c(our_brand = "royalblue4",
competitor_1 = "gray85",
competitor_2 = "gray85",
competitor_3 = "gray85",
competitor_4 = "gray85",
competitor_5 = "gray85")) +
scale_size_manual(values = c(our_brand = 1.5,
competitor_1 = 0.5,
competitor_2 = 0.5,
competitor_3 = 0.5,
competitor_4 = 0.5,
competitor_5 = 0.5)) +
scale_x_continuous(breaks = c(2010, 2012, 2014, 2016, 2018, 2020), expand = c(0, 0)) +
scale_y_continuous(limits = c(0, 100), expand = c(0, 0)) +
labs(title = "Our brand vs competitors",
subtitle = "'000 USD net sales") +
theme(
plot.title = element_text(size = 18, margin = margin(10, 0, 0, 0)),
plot.subtitle = element_text(size = 12, margin = margin(10, 0, 30, 0), color = "royalblue4"),
plot.margin = margin(0, 20, 10, 10),
panel.background = element_rect(fill = NA),
panel.grid.major = element_blank(),
axis.line = element_line(color = "gray35"),
axis.ticks = element_blank(),
axis.title = element_blank(),
axis.text = element_text(size = 10, color = "gray35"),
axis.text.x = element_text(margin = margin(5, 0, 0, 0)),
axis.text.y = element_text(margin = margin(0, 5, 0, 0)),
legend.position = "none"
)
|
eb3585871f1ed74eb3c207685fe252cf66037c29 | 0de6ee2d9d04dbe86ac4ab184e3c3b74924ae3f4 | /oblique.R | 4a3e982d9513d0f4a2fea0a48162915fb544f49a | [] | no_license | Kunahn/Master-Thesis | ae15ca7ac9d058884b736f0398dce9ee571e137c | 7a2be53bc6fb90bde4dfe609173abfe902fec7d5 | refs/heads/main | 2023-07-03T07:33:35.924618 | 2021-08-08T10:16:55 | 2021-08-08T10:16:55 | 393,918,788 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 775 | r | oblique.R | require(obliqueRF)
data(iris)
## data
# extract feature matrix
x<-as.matrix(iris[,1:4])
# convert to 0/1 class labels
y<-as.numeric(iris[,5]=="setosa")
## train
smp<-sample(1:nrow(iris), nrow(iris)/5)
obj <- obliqueRF(x[-smp,], y[-smp])
## test
pred <- predict(obj, x[smp,], type="prob")
plot(pred[,2],col=y[smp]+1,ylab="setosa probability")
table(pred[,2]>.5,y[smp])
## example: importance
imp<-rep(0,ncol(x))
names(imp)<-colnames(x)
numIterations<-10 #increase the number of iterations for better results, e.g., numIterations=100
for(i in 1:numIterations){
obj<-obliqueRF(x,y,
training_method="log", bImportance=TRUE,
mtry=2, ntree=500)
imp<-imp+obj$imp
plot(imp,t='l', main=paste("steps:", i*100), ylab="obliqueRF importance")
}
obj$trees
#plot(obj$trees) |
7842169e7a6fd0dedb0ff532bb59ee0073996e8a | 5d50ae77c3ba893223804e034ecccb40cc91b698 | /tests/testthat/test-20-dict.R | 696efc2ecb6588249ab7cd37c0516b4b294fd2d6 | [] | no_license | freyreeste/ore | 95bdfbcf98826b9ac74e759c0c771054b656d851 | 94f551fd6a52a14314f62d37be0658d8100c7656 | refs/heads/master | 2022-03-12T22:49:37.261330 | 2019-11-01T09:29:28 | 2019-11-01T09:29:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 402 | r | test-20-dict.R | context("Pattern dictionary functionality")
test_that("the pattern dictionary can be used", {
ore.dict(protocol="\\w+://")
expect_equal(ore.dict("protocol"), "protocol")
expect_equal(ore.dict(paste0("proto","col")), "protocol")
expect_equal(ore.dict(protocol), structure("\\w+://",names="protocol"))
expect_equal(as.character(ore(protocol, "?www\\.")), "(\\w+://)?www\\.")
})
|
a8e3637fb79dbfefdb9550fc99d8a120b112a7b3 | 0f64ac5e3d3cf43124dcb4917a4154829e7bb535 | /cal/se2.calAddVolume.R | dcf5717c203841b8e269fae8d9340e9c63b135e4 | [] | no_license | wactbprot/r4vl | 8e1d6b920dfd91d22a01c8e270d8810f02cea27c | a34b1fa9951926796186189202750c71e7883f8d | refs/heads/master | 2016-09-11T02:22:39.828280 | 2014-10-07T14:37:55 | 2014-10-07T14:37:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 863 | r | se2.calAddVolume.R | se2.calAddVolume <- function(ccc){
msg <- "calculated by se2.calAddVolume()"
volUnit <- "cm^3"
Vz <- 0
a <- abbrevList(ccc)
## -------------------- Zusatzvolumen aufaddieren v
for(i in 1:length(a$cma$Volume)){
fromUnit <- a$cma$Volume[[i]]$Unit
conv <- getConvFactor(ccc,volUnit, fromUnit)
msg <- paste(msg, "used", conv, "to convert from",fromUnit, "to", volUnit)
nV <- getConstVal(NA,NA,a$cma$Volume[[i]])
lnV <- length(nV) # nur die letzte Eingabe zählt
Vz <- Vz + nV[lnV] * conv
}
ccc$Calibration$Analysis$Values$Volume <-
setCcl(ccc$Calibration$Analysis$Values$Volume,
"additional",
volUnit,
Vz,
msg)
return(ccc)
}
|
8f51b9e4c5acbaa404f0e5e60bb2fe864feb6ded | e744f5702149b551e802a2e7719794c35155998f | /R/RcppExports.R | d719466a889f2f0a8da9922f33c6518eabb7fe90 | [] | no_license | vh-d/SFAsim | 76289d8b3ba059e583b415c88327e496d5f75ba5 | 7d99677837e5241a08f9e73bdb0b5cb8930291a8 | refs/heads/master | 2021-09-26T22:22:15.626239 | 2018-11-03T15:31:07 | 2018-11-03T15:31:07 | 110,331,531 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 261 | r | RcppExports.R | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
rtnorm_cpp <- function(n, mean, sd, lower, upper) {
.Call('_SFAsim_rtnorm_cpp', PACKAGE = 'SFAsim', n, mean, sd, lower, upper)
}
|
f92a186552ce93b3e9ce78b37e24f22fa784129e | e4be7f8bf65953c73b3b699a81a7d8db749e3e60 | /inst/extdata/R/test_met_prep.R | 82bdb297be17c73f84ce5a91600a5e54894d0b2e | [
"MIT"
] | permissive | FLARE-forecast/FLAREr | 0833470f9b8e744b4a0782c1a1b50a6a403534d1 | 0c1215688ea80eb7886f3ffe2d847b766b3fb6d0 | refs/heads/master | 2023-08-04T23:43:47.057220 | 2023-08-04T13:34:43 | 2023-08-04T13:34:43 | 292,376,842 | 4 | 8 | MIT | 2023-04-25T13:32:56 | 2020-09-02T19:32:21 | R | UTF-8 | R | false | false | 2,407 | r | test_met_prep.R |
template_folder <- system.file("example", package= "FLAREr")
temp_dir <- tempdir()
# dir.create("example")
file.copy(from = template_folder, to = temp_dir, recursive = TRUE)
test_directory <- file.path(temp_dir, "example")
# print(list.files(test_directory))
# print(readLines(file.path(test_directory, "test_met_prep.R")))
lake_directory <- test_directory
configuration_directory <- file.path(lake_directory, "configuration","default")
execute_directory <- file.path(test_directory, "flare_tempdir")
qaqc_data_directory <- file.path(test_directory, "data_processed")
forecast_input_directory <- file.path(test_directory, "forecasted_drivers")
##### Read configuration files
config <- yaml::read_yaml(file.path(configuration_directory,"configure_flare.yml"))
run_config <- yaml::read_yaml(file.path(configuration_directory, "configure_run.yml"))
config$run_config <- run_config
config$file_path$noaa_directory <- file.path(forecast_input_directory, config$met$forecast_met_model)
config$file_path$inflow_directory <- file.path(forecast_input_directory, config$inflow$forecast_inflow_model)
config$file_path$configuration_directory<- configuration_directory
config$file_path$execute_directory <- file.path(lake_directory, "flare_tempdir")
config$file_path$forecast_output_directory <- file.path(test_directory, "forecast_output")
config$file_path$qaqc_data_directory <- file.path(test_directory, "data_processed")
if(!dir.exists(config$file_path$execute_directory)){
dir.create(config$file_path$execute_directory)
}
file.copy(file.path(configuration_directory, "glm3.nml"), execute_directory)
config$qaqc_data_directory <- qaqc_data_directory
pars_config <- readr::read_csv(file.path(configuration_directory, config$model_settings$par_config_file), col_types = readr::cols())
obs_config <- readr::read_csv(file.path(configuration_directory, config$model_settings$obs_config_file), col_types = readr::cols())
states_config <- readr::read_csv(file.path(configuration_directory, config$model_settings$states_config_file), col_types = readr::cols())
#Download and process observations (already done)
cleaned_observations_file_long <- file.path(config$file_path$qaqc_data_directory,"fcre-targets-insitu.csv")
cleaned_inflow_file <- file.path(config$file_path$qaqc_data_directory, "fcre-targets-inflow.csv")
observed_met_file <- file.path(config$file_path$qaqc_data_directory,"observed-met_fcre.nc")
|
ceb3ca8cce14b4a0f70c0216e3582f4e361d2efd | 04c2def2074e54eae4c260eaa526d969e92c9344 | /Guillaume/Intro II/Exercice info chap 3.R | a204b85f22a9b3481614a2035c0c12acef491d84 | [] | no_license | OlivierTurcotte/r-projects | 0f18347d3b265f5709e152ecfccc2e7eae4bf12f | 87edef011aec31702f2519dd6f5e924295163c79 | refs/heads/master | 2021-05-04T00:37:07.810455 | 2018-10-25T20:53:33 | 2018-10-25T20:53:33 | 120,346,969 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,143 | r | Exercice info chap 3.R | #######EXERCICE INFO CHAPITRE 3 #####
###--------------------------Numero 1------
rm(list=ls())
#a sur papier
#b sur papier
#c)
r <- 5
q <- 1/5
M <- function(x)
{
dnbinom(x,r,q)
}
sum(M(0:10000))
distribution <- function(x,n=100000)
{
z <- numeric(n)
for(i in 1:n)
{
z[i] <- pgamma(x,2*i,1/5000)*M(i)
}
(M(0) + sum(z))
}
distribution(500000,1e6)
#d)
VaR <- optimize(function(x) abs(distribution(x,10000)-0.99),c(0,600000))[[1]]
TVaR <- function(x,n)
{
z <- numeric(n)
for(i in 1:n)
{
z[i] <- (2*i)*5000 * (1 - pgamma(x,2*i + 1,1/5000))*M(i)
}
sum(z)/(1-0.99)
}
TVaR(VaR,1e6)
###--------------------------Numero 2-----------
rm(list=ls())
distribution <- function(x)
{
0.8 + 0.2*pexp(x,1/2500)
}
distribution(VaR)
VaR <- optimize(function(x) abs(0.2 * pexp(x,1/2500) - 0.19),c(0,10000))[[1]]
qnorm(0.95)
###--------------------------Numero 3---------
rm(list=ls())
#a)
set.seed(20160419)
m <- 1000000
v <- matrix(runif(2*m),ncol=2,byrow=T)
v[m,]
theta <-qlnorm(v[,1],-0.5,1)
theta[1]
theta[m]
M <- qpois(v[,2],2*theta)
M[5]
M[m]
#b)
sum(sapply(0:120,function(x) sum(M==x)/m))
mean(M)
mean(M^2)-mean(M)^2
|
b2795301fe37d9cab523ce8403036183db136412 | d107c6ae4eeba32be15b2ce34c88661ec1b78b6d | /imageRestorationSPEEDY.R | e1d6857bfbd6455b4da8fc8dda38e5b67fba0776 | [] | no_license | bunnella/image-restoration | bd8d658dea8727e052933892c7b2901a10bca294 | 9af0b923f716d6596b2bf6936499b3a42d8275bb | refs/heads/master | 2021-01-10T12:56:59.039030 | 2016-02-24T02:32:46 | 2016-02-24T02:32:46 | 45,868,943 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,432 | r | imageRestorationSPEEDY.R | # image
# Gibbs Sample/ Simulated Annealing
# RAP
# Output
library(bmp)
library(compiler)
enableJIT(3)
# SET YOUR WORKING DIRECTORY TO THE REPO!!!
Rprof.out <- "Rprof.out" # change to NULL to turn off profiling
display <- function(img, caption = "") {
image(img/2+.5, col=gray(V/2+.5), zlim=0:1, frame=F, asp=C/R, xaxt="n", yaxt="n", main=caption)
}
# euclidian distance
d <- function(xs, ys) {
(xs-ys)^2
}
f <- function(xs, xt) {
((((xs-xt)^2)^-alpha) + gamma^-alpha)^(-1/alpha)
#min((xs-xt)^2, gamma)
}
# energy function evaluated when x_s = v (only over neighbors of s)
H <- function(s, v) {
e <- theta*d(v, y[s])
r <- ((s-1) %% R) + 1
c <- ceiling(s / R)
if (r > 1) e <- e + f(v, x[s-1])
if (c > 1) e <- e + f(v, x[s-R])
if (r < R) e <- e + f(v, x[s+1])
if (c < C) e <- e + f(v, x[s+R])
e
}
# returns a sample from the local characteristic distribution
sampleXs <- function(s, beta = 1) {
probs <- sapply(V, function(v) exp(-beta*H(s, v)))
sample(V, 1, prob = probs)
}
##############################################################################
## Gibbs Sampler!
N <- 10 # number of sweeps
V <- seq(-1, 1, length.out = 32) # set of discrete gray levels
theta <- 4 # weight on data term
gamma <- .1 # microedge penalty
alpha <- 1.2 # robustification steepness
beta <- function(n) {
tau <- 200 # max annealing temperature
omicron <- 5 # annealing steepness
tau*(1-exp(-omicron*n/N))
}
# read in the test image
picture <- read.bmp("Anne's Stuff!/img/lena_gray_512b.bmp")
R <- ncol(picture)
C <- nrow(picture)
# transform to image()-ready orientation and degrade
ch.R <- t(picture[C:1, 1:R, 1])
ch.G <- t(picture[C:1, 1:R, 2])
ch.B <- t(picture[C:1, 1:R, 3])
values <- 0.30*ch.R + 0.59*ch.G + 0.11*ch.B
original <- values / 127.5 - 1 # scale from [0..255] -> [-1, 1]
y <- original #degrade(original, perturb_percentage=.2)
# plot original + degraded, leave room for MAP estimate
par(mfrow = c(1, 3), mar = c(2.6, 1, 2.6, 1))
display(original, "Original")
display(y, "Noisy data")
# turn on profiling (or possibly not...)
Rprof(filename = Rprof.out)
# Gibbs time ;)
x <- y # init with degraded (given) image
for (n in 1:N) {
for (s in 1:(R*C)) {
x[s] = sampleXs(s, beta(n))
}
cat(paste0("\r", round(100*n/N), "%\r"))
}
# turn off profiling
Rprof(filename = NULL)
display(x, "MAP estimate")
# profile summary
if (!is.null(Rprof.out)) summaryRprof(filename = Rprof.out)
|
f7734060d206b8e7b7ef53cce9fbaab4825855d6 | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/13688_0/rinput.R | dbdb052fec3dc9d5b276ab1c047a04928e946f39 | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 137 | r | rinput.R | library(ape)
testtree <- read.tree("13688_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="13688_0_unrooted.txt") |
375b6a29c40739bc872a33ab61e9faa33126b67d | e27cd958cee85bbe670882d00d0a5f6d338c6344 | /Caret.R | 8e94c608e87d01ac37e79c324db9295ba9d45e24 | [] | no_license | MiG-Kharkov/Try-packages | f5384f456e7defde43688111c26f5894e51f81b2 | 745c5fbb4fa603a51cd8b2c8846ea8dc55cdcd15 | refs/heads/master | 2021-01-22T05:42:25.316381 | 2017-06-01T03:46:33 | 2017-06-01T03:46:33 | 92,486,725 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 623 | r | Caret.R | library(caret)
# Библиотека ниже делает разные метки и полупрозрачные
library(AppliedPredictiveModeling)
transparentTheme(trans = .4)
featurePlot(x = iris[, 1:4],
y = iris$Species,
plot = "pairs",
auto.key = list(columns = 3))
featurePlot(x = iris[, 1:4],
y = iris$Species,
plot = "box",
## Pass in options to bwplot()
scales = list(y = list(relation="free"),
x = list(rot = 90)),
layout = c(4,1 ),
auto.key = list(columns = 2))
|
3f422dcd6e1bc524fa7620b368790bab20a86913 | d06b678bb34ec639db2bb70552db8590571a827a | /R/two_part_compare.R | 6d0a78ea8c3ad6d06ecce3f412a72ae9d0283fd2 | [
"MIT"
] | permissive | haleyJ2021/microbiotaPair | 5e9689a690aaa36dda1f4f11f1436964132d15ab | 21c93e7b0e7b6faca6c49b495b62eda8d39cd1d7 | refs/heads/master | 2023-03-19T06:12:15.538871 | 2021-03-15T02:48:51 | 2021-03-15T02:48:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,859 | r | two_part_compare.R | #' two part compare
#' two part (paired test & McNemar’s Chi-Square Test) to maximum the data infomation
#' @param microbiota a data.frame, microbiome data row is sample id, col is variable
#' @param metadata a data.frame, metadata row is sample id ,col is variable
#' @param percent numberic, 0-1, the cutoff of species occrance
#' @return
#' data.frame
#' @export
#'
#' @examples
two_part_compare <- function(microbiota , metadata, percent){
# match the ID , get the baseline & treatment data
matchname <- names(table(metadata[, pairID_varname]))[table(metadata[, pairID_varname]) == 2]
outconfig <- metadata[!is.na(match(metadata[, pairID_varname], matchname)), ]
matchdat <- metadata[order(metadata[, pairID_varname]), ]
# to order the varname ,keep same as the time_name
matchdat[, time_varname] <- factor(matchdat[, time_varname], levels = time_name)
matchdat <- matchdat[order(matchdat[, time_varname]), ]
number <- length(matchname)
# to make sure the microbiota's sample ID is row
matchmicrobiota <- microbiota[rownames(matchdat), ]
# to remove low occrance feature
matchmicrobiota <- filterPer(matchmicrobiota, row = 2, percent = percent)
g1microbiota <- matchmicrobiota[1:number, ]
g2microbiota <- matchmicrobiota[(number+1):(2*number), ]
# part1 transform the matrix to 0-1
g1microbiota2 <- g1microbiota
g2microbiota2 <- g2microbiota
g1microbiota2[g1microbiota2 >0] <- 1
g1microbiota2[g1microbiota2 == 0] <- 0
g2microbiota2[g2microbiota2 >0] <- 1
g2microbiota2[g2microbiota2 == 0] <- 0
# filter all zero variable
out <- matrix(NA, nrow = ncol(matchmicrobiota), ncol = 5+2+3)
for(i in 1:ncol(matchmicrobiota)){
x <- g1microbiota2[,i]
y <- g2microbiota2[,i]
out[i, 1:5] <- c(length(x), sum(x==0), sum(y==0), median(g1microbiota[,i]),
median(g2microbiota[,i]))
x <- factor(x, levels = c(0, 1))
y <- factor(y, levels = c(0, 1))
tabletmp <- table(x, y)
mcnemarres <- mcnemar.test(tabletmp)
out[i, 6:7] <- as.numeric(c(mcnemarres$statistic, mcnemarres$p.value))
}
# part2 transform the matrix log
minvalue <- min(matchmicrobiota[matchmicrobiota!=0])
g1microbiota3 <- log(g1microbiota+minvalue)
g2microbiota3 <- log(g2microbiota+minvalue)
for(i in 1:ncol(matchmicrobiota)){
x <- g1microbiota3[,i]
y <- g2microbiota3[,i]
if(x!=0)
ttestres <- t.test(x, y ,paired = T)
out[i, 8:10] <- as.numeric(c(ttestres$statistic, ttestres$estimate, ttestres$p.value))
}
# meta-analysis
rownames(out) <- colnames(matchmicrobiota)
colnames(out) <- c("No.sample",
paste0(rep(c("No.absent.", "medianAbundance."),each=2), time_name),
"Chisq_Mcnemar", "Pvalue_Mcnemar",
"T_Ttest", "Estimate_Ttest", "Pvalue_Ttest"
)
return(out)
}
|
bce1d2038c700e5127cbcfd04a73efdecc322789 | df7b3a1eb6f65ac3e576f8b49ad385cb0303f2b0 | /R/eln4rmd.R | a7cc28bd38f34edb29905e8bec7413aa37f129e4 | [
"MIT"
] | permissive | ykunisato/senshuRmd | 0f974443ae343fc8f963e718f1d3b1102dbd3ae5 | 2f60fda7269985917f7d3e0dacb47a8666eb4590 | refs/heads/master | 2022-04-01T17:43:42.117858 | 2022-03-15T03:54:14 | 2022-03-15T03:54:14 | 217,082,796 | 7 | 2 | null | null | null | null | UTF-8 | R | false | false | 8,034 | r | eln4rmd.R | #' @title make new Japanese e-labnotebook in Markdown
#' @importFrom utils file.edit
#' @param add_name If you want to add the name after the date as the file name,
#' add this argument.
#' @param replace_date If you want to use the specified date as the file name
#' instead of today's date, add this argument.
#' @param rc If you are using Research Compendium of senshuRmd,
#' you can create a e-labnotebook file in the "labnote" directory from the current directory.
#' In that case, please set rc to TURE.
#' @export
elnjp_md <- function(add_name = FALSE, replace_date = FALSE, rc = FALSE) {
tmp_wd <- getwd()
# set file name
if(replace_date == FALSE){
date_name <- strsplit(paste0(as.POSIXlt(Sys.time(), format="%Y-%m-%d %H:%M:%S", tz="Japan")), " +")[[1]][1]
date_write <- date_name
}else{
date_name <- replace_date
date_write <- strsplit(paste0(as.POSIXlt(Sys.time(), format="%Y-%m-%d %H:%M:%S", tz="Japan")), " +")[[1]][1]
}
if(add_name == FALSE){
file_name <- paste0(date_name, ".Rmd")
}else{
file_name <- paste0(date_name, add_name, ".Rmd")
}
# set Rmd template file
path_skeleton <- system.file("rmarkdown/templates/eln_jp/skeleton/skeleton.Rmd",package = "eln4Rmd")
text_skeleton <- readLines(path_skeleton, warn = F)
if(rc == TRUE){
file_name <- paste0("labnote/",file_name)
}
tmp_rmd <- file(file_name, "w")
for (i in 1:length(text_skeleton)) {
st <- text_skeleton[i]
st <- str_replace(st, pattern = "# date_research",
replacement = paste0("date_research <- '",date_name, "'"))
st <- str_replace(st, pattern = "# date_write",
replacement = paste0("date_write <- '",date_write, "'"))
writeLines(st, tmp_rmd)
}
close(tmp_rmd)
navigateToFile(paste0(tmp_wd,"/",file_name))
}
#' @title make new Japanese e-labnotebook in PDF
#' @importFrom stringr str_detect
#' @importFrom stringr str_replace
#' @importFrom rstudioapi navigateToFile
#' @param add_name If you want to add the name after the date as the file name,
#' add this argument.
#' @param replace_date If you want to use the specified date as the file name
#' instead of today's date, add this argument.
#' @param rc If you are using Research Compendium of senshuRmd,
#' you can create a e-labnotebook file in the "labnote" directory from the current directory.
#' In that case, please set rc to TURE.
#' @export
elnjp_pdf <- function(add_name = FALSE,replace_date = FALSE, rc = FALSE) {
tmp_wd <- getwd()
# set file name
if(replace_date == FALSE){
date_name <- strsplit(paste0(as.POSIXlt(Sys.time(), format="%Y-%m-%d %H:%M:%S", tz="Japan")), " +")[[1]][1]
date_write <- date_name
}else{
date_name <- replace_date
date_write <- strsplit(paste0(as.POSIXlt(Sys.time(), format="%Y-%m-%d %H:%M:%S", tz="Japan")), " +")[[1]][1]
}
if(add_name == FALSE){
file_name <- paste0(date_name, ".Rmd")
}else{
file_name <- paste0(date_name, "_" ,add_name, ".Rmd")
}
output_file_name <- file_name
# set Rmd template file
path_skeleton <- system.file("rmarkdown/templates/eln_jp/skeleton/skeleton.Rmd",package = "eln4Rmd")
text_skeleton <- readLines(path_skeleton, warn = F)
# set render function
if(rc == TRUE){
file_name <- paste0("labnote/",file_name)
}
tmp_rmd <- file(file_name, "w")
for (i in 1:length(text_skeleton)) {
st <- text_skeleton[i]
st <- str_replace(st, pattern = "output: md_document",
replacement = paste0("output: eln4Rmd::render_elnjp_pdf(Rmd_file = '",output_file_name,"')"))
st <- str_replace(st, pattern = "# date_research",
replacement = paste0("date_research <- '",date_name, "'"))
st <- str_replace(st, pattern = "# date_write",
replacement = paste0("date_write <- '",date_write, "'"))
writeLines(st, tmp_rmd)
}
close(tmp_rmd)
navigateToFile(paste0(tmp_wd,"/", file_name))
}
#' @title render function for Japanese e-labnotebook in PDF
#' @importFrom rmarkdown render
#' @importFrom rmarkdown pdf_document
#' @importFrom stringr str_replace
#' @param Rmd_file file name of R Markdown file
#' @export
render_elnjp_pdf <- function(Rmd_file) {
# covert Rmd file to PDF file
template_tex_file <- system.file("rmarkdown/templates/eln_jp/resources/eln_jp.tex",
package = "eln4Rmd")
format_pdf <- pdf_document(
latex_engine = "xelatex",
template = template_tex_file,
highlight = "tango")
format_pdf$inherits <- "pdf_document"
render(Rmd_file, format_pdf)
}
#' @title upload Japanese e-labnotebook to OSF
#' @importFrom osfr osf_retrieve_node
#' @importFrom osfr osf_upload
#' @importFrom stringr str_replace
#' @param add_name If you want to add the name after the date as the file name,
#' add this argument.
#' @param replace_date If you want to use the specified date as the file name
#' instead of today's date, add this argument.
#' @param eln_osf URL of pdf directory in OSF
#' @param rc_osf If you are using Research Compendium of senshuRmd,
#' you can create a e-labnotebook file in the "labnote" directory from the current directory.
#' In that case, please set rc to TURE.
#' @export
up_elnjp_osf <- function(add_name = FALSE, replace_date = FALSE, eln_osf = FALSE, rc_osf = FALSE){
# check argument
if(missing(eln_osf) & missing(rc_osf)){
stop("eln_osf\u304brc_osf\u306b\u5165\u529b\u3092\u3057\u3066\u304f\u3060\u3055\u3044\u3002")
}
# set path
tmp_wd <- getwd()
if(rc_osf != FALSE){
tmp_wd = paste0(tmp_wd, "/labnote")
}
# set file name
if(replace_date == FALSE){
date_name <- strsplit(paste0(as.POSIXlt(Sys.time(), format="%Y-%m-%d %H:%M:%S", tz="Japan")), " +")[[1]][1]
}else{
date_name <- replace_date
}
# set file name
if(add_name == FALSE){
pdf_file_name <- paste0(date_name, ".pdf")
}else{
pdf_file_name <- paste0(date_name, "_" ,add_name, ".pdf")
}
# upload labnote
if(eln_osf != FALSE){
labnote_pdf <- osf_retrieve_node(eln_osf)
osf_upload(labnote_pdf, path = paste0(tmp_wd,"/",pdf_file_name), conflicts = "overwrite")
}
# upload backup
if(rc_osf != FALSE){
rc_component <- osf_retrieve_node(rc_osf)
osf_upload(rc_component, path = paste0(getwd(), "/"), recurse = TRUE, conflicts = "overwrite")
}
}
#' @title upload Japanese e-labnotebook to GitHub
#' @importFrom gert git_add
#' @importFrom gert git_commit_all
#' @importFrom gert git_push
#' @importFrom gert git_info
#' @importFrom gert git_status
#' @importFrom stringr str_replace
#' @param add_name If you want to add the name after the date as the file name,
#' add this argument.
#' @param replace_date If you want to use the specified date as the file name
#' instead of today's date, add this argument.
#' @param rc If you are using Research Compendium of senshuRmd,
#' you can create a e-labnotebook file in the "labnote" directory from the current directory.
#' In that case, please set rc to TURE.
#' @export
up_elnjp_git <- function(add_name = FALSE, replace_date = FALSE, rc = FALSE) {
# make pdf firectory
tmp_wd <- getwd()
if(rc == TRUE){
tmp_wd = paste0(tmp_wd, "/labnote")
}
if(!dir.exists(file.path(tmp_wd, "pdf"))){
dir.create(file.path(tmp_wd, "pdf"), showWarnings = FALSE)
}
# set file name
if(replace_date == FALSE){
date_name <- strsplit(paste0(as.POSIXlt(Sys.time(), format="%Y-%m-%d %H:%M:%S", tz="Japan")), " +")[[1]][1]
}else{
date_name <- replace_date
}
if(add_name == FALSE){
file_name <- date_name
}else{
file_name <- paste0(date_name, "_" ,add_name)
}
# copy PDF
file.copy(paste0(tmp_wd,"/",file_name,".pdf"),
paste0(tmp_wd,"/pdf/",file_name,".pdf"), overwrite = TRUE)
# add & commit & push
git_add(git_status()$file)
git_commit_all(paste0(file_name, "\u306e\u30e9\u30dc\u30ce\u30fc\u30c8\u3092\u4f5c\u6210\u3057\u307e\u3057\u305f\u3002\u95a2\u9023\u3059\u308b\u30d5\u30a1\u30a4\u30eb\u3082\u30b3\u30df\u30c3\u30c8\u3057\u307e\u3059"))
git_push()
}
|
eaba4b5dd2dc505d508c53a1a7ba56a0757e4b77 | 97a38f09dce88a8460737ab6cf4e2b2335624125 | /man/coupled2_CASPF.Rd | 94587dc49b4a9b1c8ea55b86d33d55505f08c20b | [] | no_license | jeremyhengjm/UnbiasedScore | a7959eb3ddb892a522b413a3eae47d4573309bd3 | b036ac899721ea240a2a0a67440947d1c0698ac9 | refs/heads/master | 2023-04-20T09:54:44.835291 | 2021-05-10T13:34:29 | 2021-05-10T13:34:29 | 257,778,224 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,584 | rd | coupled2_CASPF.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coupled2_CASPF.R
\name{coupled2_CASPF}
\alias{coupled2_CASPF}
\title{2-way Coupled Conditional Ancestor Sampling Particle Filter}
\usage{
coupled2_CASPF(
model,
theta,
discretization,
observations,
nparticles,
resampling_threshold = 1,
coupled_resampling,
ref_trajectory1,
ref_trajectory2,
treestorage = FALSE
)
}
\arguments{
\item{model}{a list representing a hidden Markov model, e.g. \code{\link{hmm_ornstein_uhlenbeck}}}
\item{theta}{a vector of parameters as input to model functions}
\item{discretization}{list containing stepsize, nsteps, statelength and obstimes}
\item{observations}{a matrix of observations, of size nobservations x ydimension}
\item{nparticles}{number of particles}
\item{resampling_threshold}{ESS proportion below which resampling is triggered (always resample at observation times by default)}
\item{coupled_resampling}{a 2-way coupled resampling scheme, such as \code{\link{coupled2_maximal_independent_residuals}}}
\item{ref_trajectory1}{a matrix of first reference trajectory, of size xdimension x statelength}
\item{ref_trajectory2}{a matrix of second reference trajectory, of size xdimension x statelength}
\item{treestorage}{logical specifying tree storage of Jacob, Murray and Rubenthaler (2013);
if missing, this function store all states and ancestors}
}
\value{
a pair of new trajectories stored as matrices of size xdimension x statelength.
}
\description{
Runs two coupled conditional particle filters (at each discretization level).
}
|
618ebc0a09fb29eea6ec6f4a44fe4516ba94efe6 | 0d3573551bfdd83d3e7e7570e18ab3669ef5e968 | /R/helper_functions.R | d41be90ee183e76f844eb0e0bb8a060745f8da08 | [] | no_license | amulmgr/TreeTools | ce313d3082c62e904e18d9a97664ae940b932d89 | 69bc4f0aa6d1d813023fb757e87b937a66d0051b | refs/heads/master | 2023-07-30T09:25:41.595757 | 2021-09-17T07:11:58 | 2021-09-17T07:11:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,288 | r | helper_functions.R | #' Quickly sample
#'
#' `SampleOne()` is a fast alternative to [`sample()`] that avoids some checks.
#'
#' @param x A vector to sample.
#' @param len (Optional) Integer specifying length of `x`.
#'
#' @return `SampleOne()` returns a length one vector, randomly sampled from `x`.
#'
#' @examples
#' SampleOne(9:10)
#' SampleOne(letters[1:4])
#'
#' @template MRS
#' @keywords internal
#' @export
SampleOne <- function (x, len = length(x)) {
x[sample.int(len, 1L, FALSE, NULL, FALSE)]
}
#' Add tree to start of list
#'
#' `UnshiftTree()` adds a phylogenetic tree to the start of a list of trees.
#' This is useful where the class of a list of trees is unknown, or where
#' names of trees should be retained.
#'
#' Caution: adding a tree to a `multiPhylo` object whose own attributes apply
#' to all trees, for example trees read from a Nexus file, causes data to be
#' lost.
#'
#' @param add Tree to add to the list, of class [`phylo`][ape::read.tree].
#' @param treeList A list of trees, of class `list`,
#' [`multiPhylo`][ape::multiphylo], or, if a single tree,
#' [`phylo`][ape::read.tree].
#'
#' @return `UnshiftTree()` returns a list of class `list` or `multiPhylo`
#' (following the original class of `treeList`), whose first element is the
#' tree specified as `add.
#'
#' @seealso
#' [`c()`] joins a tree or series of trees to a `multiPhylo` object, but loses
#' names and does not handle lists of trees.
#'
#' @examples
#' forest <- as.phylo(0:5, 6)
#' tree <- BalancedTree(6)
#'
#' UnshiftTree(tree, forest)
#' UnshiftTree(tree, tree)
#' @template MRS
#'
#' @export
UnshiftTree <- function (add, treeList) {
if (inherits(treeList, 'multiPhylo')) {
structure(c(list(add), lapply(treeList, I)), class = 'multiPhylo')
} else if (inherits(treeList, 'phylo')) {
structure(list(add, treeList), class = 'multiPhylo')
} else { # including: if (is.list(trees)) {
c(list(add), treeList)
}
}
#' Apply a function that returns 64-bit integers over a list or vector
#'
#' Wrappers for members of the [`lapply()`] family intended for use when a
#' function `FUN` returns a vector of `integer64` objects.
#' `vapply()`, `sapply()` or `replicate()` drop the `integer64` class,
#' resulting in a vector of numerics that require conversion back to
#' 64-bit integers. These functions restore the missing `class` attribute.
#'
#' @inheritParams base::lapply
#' @param FUN.LEN Integer specifying the length of the output of `FUN`.
#' @details For details of the underlying functions, see [`base::lapply()`].
#' @examples
#' sapply64(as.phylo(1:6, 6), as.TreeNumber)
#' vapply64(as.phylo(1:6, 6), as.TreeNumber, 1)
#' set.seed(0)
#' replicate64(6, as.TreeNumber(RandomTree(6)))
#' @template MRS
#' @seealso [`bit64::integer64()`][bit64-package]
#' @export
sapply64 <- function (X, FUN, ..., simplify = TRUE, USE.NAMES = TRUE) {
structure(sapply(X, FUN, ..., simplify, USE.NAMES), class = 'integer64')
}
#' @rdname sapply64
#' @export
vapply64 <- function (X, FUN, FUN.LEN = 1, ...) {
structure(vapply(X, FUN, FUN.VALUE = numeric(FUN.LEN), ...),
class = 'integer64')
}
#' @rdname sapply64
#' @export
replicate64 <- function (n, expr, simplify = "array") {
sapply64(integer(n), eval.parent(substitute(function (...) expr)),
simplify = simplify)
}
|
e1b36fb96be743e371ba0108875e395ed9bd7db7 | 34da6a49cf008e4fdef793a4e35bdd79f99b86b8 | /R/get.movie.info.R | d1def8401f465483e38bbdc1b1cba6d10a57dacd | [] | no_license | ljtyduyu/Rdouban | 76ed05b6cf89018378d8f72d0f790db4d1c3ed79 | 60bc136447ab4b8466172ca61d26ed85909f28fc | refs/heads/master | 2020-03-20T21:17:51.039274 | 2015-01-06T15:47:24 | 2015-01-06T15:47:24 | 137,733,735 | 0 | 2 | null | 2018-06-18T09:35:31 | 2018-06-18T09:35:31 | null | UTF-8 | R | false | false | 996 | r | get.movie.info.R | get.movie.info<-function(movieid){
u=paste0("https://api.douban.com/v2/movie/",movieid)
#p=getURL(u,ssl.verifypeer = FALSE)
p<-.refreshURL(u,ssl.verifypeer = FALSE)
reslist <- fromJSON(p)
title<-reslist[["title"]]
author<-unlist(reslist[["author"]]);names(author)<-NULL
rating<-unlist(reslist[["rating"]])
tags<-reslist[["tags"]]
summary<-reslist[["summary"]]
tags<-data.frame(tag_label=sapply(tags,function(x)x[["name"]]),
tag_freq=sapply(tags,function(x)x[["count"]]),
stringsAsFactors=F)
image<-reslist[['image']]
href<-reslist[["alt"]]
reslist$title<-NULL;reslist$author<-NULL;
reslist$rating<-NULL;reslist$tags<-NULL;
reslist$summary<-NULL;reslist$alt<-NULL
reslist$image<-NULL
attribute=reslist$attrs
list(title=title,
author=author,
rating=as.double(rating),
summary=summary,
tags=tags,
href=href,
image=image,
attribute= attribute)
} |
87e08857ef52cd289f493dae3de54bc3c04051ed | eb673959eac2da642a758d0e49ba9a28a0568e2f | /0611_files.R | ffc9f51076dd95680837ad16a086ba5b11a7819a | [] | no_license | ryu-jihye/Digital-ConvergenceProgram2 | e021ee4938e79c4b75b7480ac8cf5ccd1c506633 | a4aa8ac597f7210a2cc9b507e9f9ed281be93dbf | refs/heads/main | 2023-06-18T19:06:17.045811 | 2021-07-15T14:52:24 | 2021-07-15T14:52:24 | 364,277,829 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,495 | r | 0611_files.R | cafe <- list(espresso = c(4, 5, 3, 6, 5, 4, 7),
americano = c(63, 68, 64, 68, 72, 89, 94),
latte = c(61, 70, 59, 71, 71, 92, 88),
price = c(2.0, 2.5, 3.0),
menu = c('espresso', 'americano', 'latte'))
cafe$menu <- factor(cafe$menu)
names(cafe$price) <- cafe$menu
sale.espresso <- cafe$price['espresso'] * cafe$espresso
sale.americano <- cafe$price['americano'] * cafe$americano
sale.latte <- cafe$price['latte'] * cafe$latte
sale.day <- sale.espresso + sale.americano + sale.latte
names(sale.day) <- c('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
sum(sale.day)
sale.mean <- mean(sale.day)
names(sale.day[sale.day] >= sale.mean)
-------------------------------------------------------
accident <- c(31, 26, 42, 47, 50, 54, 70, 66, 43, 32, 32, 22)
names(accident) <- c('M1', 'M2', 'M3', 'M4', 'M5', 'M6', 'M7', 'M8',
'M9', 'M10', 'M11', 'M12')
accident
sum(accident)
max(accident)
min(accident)
accident*0.9
accident[accident>=50]
month.50 <- accident[accident >= 50]
names(month.50)
names(accident[accident >= 50])
length(accident[accident<50])
M6.acc <- accident[6]
accident[accident > M6.acc]
accident[accident > accident[6]]
-------------------------------------------------------
class(trees)
str(trees)
girth.mean <- mean(trees$Girth)
candidate <- subset(tress, Girth >= girth.mean & Height > 80 & Volume > 50)
candidate
nrow(candidate)
|
1342a300cd24ad6c4d0834f8d3105a25bd61efda | 86cc7cca25ad3cec2b355a7f29dcdbb12e288f0a | /tests/testthat.R | df6d1d780d563a90cc556791853ef7d2193bd384 | [] | no_license | t-carroll/schex | 64ddb9f6f471536e821edcb734b51ee7176cf42c | 12b078cb85295671e068f119b648ba9bd94f4fb5 | refs/heads/master | 2022-11-05T06:33:40.353876 | 2020-06-15T19:39:09 | 2020-06-15T19:39:09 | 272,527,867 | 0 | 0 | null | 2020-06-15T19:37:34 | 2020-06-15T19:37:33 | null | UTF-8 | R | false | false | 54 | r | testthat.R | library(testthat)
library(schex)
test_check("schex")
|
4ade3ed2b08529bdd8e2557e618a6fbc4cb1190e | dbdca1892808ac0284875f692297d5cb8d1abd10 | /buyside/main.R | 16f9279b28f1b6b27ad0c2ed50f00ae2db5186bf | [] | no_license | mfzhang/lehedge | 0cec9a68fbf35fef23f40e099576761f0a52f7d0 | 7c4cc6c8a8fbaeb03cc607f09f121e6285d0fe2a | refs/heads/master | 2016-09-09T13:18:01.540110 | 2014-10-13T23:06:10 | 2014-10-13T23:06:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 133 | r | main.R | source('buyside/best_windows.R')
source('buyside/build_fat_training.R')
#source('buyside/write_images.R')
source('buyside/labels.R')
|
1dd95064df5adaa96cb29366a166159591439bdc | b351240b69be9faf918e17814580a47eb7e8475d | /workout03/binomial/man/bin_variable.Rd | 44ef136d6b54a0dd00d827856996cd5fb52d811a | [] | no_license | stat133-sp19/hw-stat133-bveeramani | 49b1a7c685ba9e8b35899442d41d7becdcd5da67 | 681ac3dbc6551521553fba33993f9380454fa3ca | refs/heads/master | 2020-04-28T14:06:44.632989 | 2019-04-30T06:08:50 | 2019-04-30T06:08:50 | 175,327,896 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 599 | rd | bin_variable.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/binomial.R
\name{bin_variable}
\alias{bin_variable}
\title{Binomial random variable constructor}
\usage{
bin_variable(trials, prob)
}
\arguments{
\item{trials}{number of I.I.D. Bernoulli trials}
\item{prob}{the shape parameter of each I.I.D. Bernoulli trial}
}
\value{
an object representing a binomial random variable
}
\description{
Creates a binomial random variable with the specified shape
parameters.
}
\examples{
binvar = bin_variable(trials = 10, prob = 0.5)
cdf = bin_cumulative(binvar$trials, binvar$prob)
}
|
ba7e214921ada727b0244c67f342aeaf5f02bb26 | dbb290d87a730cd5b9154025f71ba65ef149bd6c | /R/FW1_GAMLSS.R | 04020919a8c605ad572a8b88adc1f350df11fce7 | [] | no_license | fabiopviera/Flexible-Weibull | 377188fd34f5d950c8b4ee13d124330ae57db480 | 1a69766d5d0c50bc6f0e098e09a5c8a6a24dbbc9 | refs/heads/main | 2023-07-03T03:57:53.350366 | 2021-08-05T01:39:20 | 2021-08-05T01:39:20 | 378,260,458 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,802 | r | FW1_GAMLSS.R | library(gamlss)
library(gamlss.dist)
FW1 <- function (mu.link="log", sigma.link="log")
{
mstats <- checklink("mu.link", "flexible Weibull 1", substitute(mu.link),c("1/mu^2", "log ", "identity"))
dstats <- checklink("sigma.link", "flexible Weibull 1", substitute(sigma.link),c("inverse", "log", "identity"))
structure(list(family = c("FW1 ", "flexible Weibull 1"),
parameters = list(mu=TRUE, sigma=TRUE),
nopar = 2,
type = "Continuous",
mu.link = as.character(substitute(mu.link)),
sigma.link = as.character(substitute(sigma.link)),
mu.linkfun = mstats$linkfun,
sigma.linkfun = dstats$linkfun,
mu.linkinv = mstats$linkinv,
sigma.linkinv = dstats$linkinv,
mu.dr = mstats$mu.eta,
sigma.dr = dstats$mu.eta,
dldm = function(y,mu,sigma){ #----------------------------------------------------- ok
nd1 = gamlss:::numeric.deriv(dFW1(y, mu, sigma,log = T),"mu", delta = 1e-04)
dldm = as.vector(attr(nd1, "gradient"))
dldm
},
d2ldm2 = function(y,mu,sigma){#----------------------------------------------------- ok
nd1 = gamlss:::numeric.deriv(dFW1(y, mu, sigma,log = T),"mu", delta = 1e-04)
dldm = as.vector(attr(nd1, "gradient"))
d2ldm2 = -dldm * dldm
},
dldd = function(y,mu,sigma){#----------------------------------------------------- ok
nd1 = gamlss:::numeric.deriv(dFW1(y, mu, sigma,log =T),"sigma", delta = 1e-04)
dldd = as.vector(attr(nd1, "gradient"))
dldd
} ,
d2ldd2 = function(y,mu,sigma){#----------------------------------------------------- ok
nd1 = gamlss:::numeric.deriv(dFW1(y, mu, sigma,log =T),"sigma", delta = 1e-04)
dldd = as.vector(attr(nd1, "gradient"))
d2ldd2 = -dldd*dldd
d2ldd2
},
d2ldmdd = function(y,mu,sigma){#----------------------------------------------------- ok
nd1 = gamlss:::numeric.deriv(dFW1(y, mu, sigma,log= TRUE), "mu", delta = 1e-04)
dldm = as.vector(attr(nd1, "gradient"))
nd1 = gamlss:::numeric.deriv(dFW1(y, mu, sigma,log=TRUE), "sigma", delta = 1e-04)
dldd = as.vector(attr(nd1, "gradient"))
d2ldmdd = -dldm * dldd
d2ldmdd
},
#----------------------------------------------------- ok
G.dev.incr = function(y,mu,sigma,...) -2*dFW1(y,mu=mu,sigma=sigma,log=TRUE),
rqres = expression(rqres(pfun="pFW1", type="Continuous", y=y, mu=mu, sigma=sigma)),
mu.initial = expression( mu <- rep(median(y),length(y))),
sigma.initial = expression( sigma <- rep(min(y)/(10*max(y)),length(y))),
mu.valid = function(mu) all(mu > 0),
sigma.valid = function(sigma) all(sigma > 0),
y.valid = function(y) all(y>0)
),
class = c("gamlss.family","family"))
}
#--------------------------------------------------------------
dFW1 <- function(x, mu=1, sigma=0.5, log = FALSE)
{
if (any(mu <= 0)) stop(paste("mu must be positive", "\n", ""))
if (any(sigma <= 0)) stop(paste("sigma must be positive", "\n", ""))
if (any(x < 0)) stop(paste("x must be positive", "\n", ""))
ll <- log(log(2))
b <- (((2*sigma*mu-ll)^2)-ll^2)/(4*sigma)
f <- (exp((sigma*x - b/x)-exp(sigma*x - b/x)))*(sigma+b/(x^2))
if(log==FALSE) fx <- f else fx <- log(f)
fx
}
#--------------------------------------------------------------
pFW1 <- function(q, mu=1, sigma=0.5, lower.tail = TRUE, log.p = FALSE)
{
if (any(mu <= 0)) stop(paste("mu must be positive", "\n", ""))
if (any(sigma <= 0)) stop(paste("sigma must be positive", "\n", ""))
if (any(q < 0)) stop(paste("q must be positive", "\n", ""))
ll <- log(log(2))
b <- (((2*sigma*mu-ll)^2)-ll^2)/(4*sigma)
cdf <- 1- (exp(-exp(sigma*q - b/q)))
if(lower.tail==TRUE) cdf <- cdf else cdf <- 1-cdf
if(log.p==FALSE) cdf <- cdf else cdf <- log(cdf)
cdf
}
#--------------------------------------------------------------
hFW1 <- function(x, mu=1, sigma=0.5, lower.tail = TRUE, log.p = FALSE)
{
h <- dFW1(x,mu,sigma)/(1-pFW1(x,mu,sigma))
h
}
#--------------------------------------------------------------
qFW1 <- function(p, mu=1, sigma=0.5, lower.tail = TRUE, log.p = FALSE )
{
if (any(mu <= 0)) stop(paste("mu must be positive", "\n", ""))
if (any(sigma <= 0)) stop(paste("sigma must be positive", "\n", ""))
if (log.p==TRUE) p <- exp(p) else p <- p
if (lower.tail==TRUE) p <- p else p <- 1-p
if (any(p < 0)|any(p > 1)) stop(paste("p must be between 0 and 1", "\n", ""))
ll <- log(log(2))
b <- (((2*sigma*mu-ll)^2)-ll^2)/(4*sigma)
y <- (log(-log(1-p)) + sqrt( ((log(-log(1-p)))^2) + 4*sigma*b))/(2*sigma)
y
}
#--------------------------------------------------------------
rFW1 <- function(n, mu=1, sigma=0.5, nu=1)
{
if (any(mu <= 0)) stop(paste("mu must be positive", "\n", ""))
if (any(sigma <= 0)) stop(paste("sigma must be positive", "\n", ""))
n <- ceiling(n)
u <- runif(n)
r <- qFW1(u,mu=mu,sigma=sigma)
r
}
#--------------------------------------------------------------
|
36827432e39726f9edad8d9fbc3fe0613565fd0e | dbfe5ce272e204a8e1663ced35c9d48ef4870496 | /man/pd.var.Rd | a6dff3b043df1d8cf5e3a29f562656ec5afce0aa | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hmito/hmRLib | fac91a4e2ddfcd899283ec0b63c87c31965fb17f | f2cfd54ea491ee79d64f7dd976a94086092b8ef5 | refs/heads/master | 2023-08-31T07:21:31.825394 | 2023-08-28T10:02:07 | 2023-08-28T10:02:07 | 41,907,654 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 412 | rd | pd.var.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/statistic.R
\name{pd.var}
\alias{pd.var}
\title{Return variance of the probability distribution}
\usage{
pd.var(x, pd)
}
\arguments{
\item{x}{axis value}
\item{pd}{probability distribution or histgram data}
}
\value{
variance of the given pd
}
\description{
Calculate the variance of the probability distribution or histgram data.
}
|
b2d7018dac4aa89ea3944529904bb7e4d78d09ca | 0a906cf8b1b7da2aea87de958e3662870df49727 | /distr6/inst/testfiles/C_EmpiricalMVPdf/libFuzzer_C_EmpiricalMVPdf/C_EmpiricalMVPdf_valgrind_files/1610036522-test.R | 1d8af5b5519c9ce3a5892e627d7318306b4e3ac3 | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 544 | r | 1610036522-test.R | testlist <- list(data = structure(c(2.05226840064922e-289, 1.03766507280536e-311, 3.25515595954408e-260, 1.82935631644629e-260, 1.42602581597035e-105, 1.42602581596996e-105, 1.42602581598051e-105), .Dim = c(7L, 1L )), x = structure(c(NaN, 2.11755751135623e+214, 2.29691280661499e+161, NA, 9.86217156996503e+248, 3.79212874880736e+146, 1.42602581597039e-105, NA, 6.92379100084546e-317, 1.46200557661842e-105, 1.42602567899554e-105, 1.53797105130516e-304), .Dim = c(6L, 2L)))
result <- do.call(distr6:::C_EmpiricalMVPdf,testlist)
str(result) |
96641eac76a6910e3f2a8557234d15db11005401 | 1a63c15398a830a9447d86828e76cc2790e6af1e | /R/tag.R | c7747aef34e22aa927aa2db78c23281d609216bf | [
"MIT"
] | permissive | sckott/discgolf | 327fff393389b1f955f85ce50b88265263a49c95 | 7fd0de8878ddc2a014b358def8ba1580165be5e6 | refs/heads/master | 2021-07-12T09:01:07.138933 | 2021-02-26T21:02:48 | 2021-03-01T16:35:14 | 28,190,714 | 7 | 3 | NOASSERTION | 2021-03-01T16:35:14 | 2014-12-18T16:11:01 | R | UTF-8 | R | false | false | 195 | r | tag.R | #' Tag data
#'
#' @param tag A tag name
#' @template args
#' @examples \dontrun{
#' tag('r')
#' tag('geospatial')
#' }
tag <- function(tag, ...) {
disc_GET(sprintf("tags/%s.json", tag), ...)
}
|
4da957295e81ef6e955414a4a52bfa030ba7797e | 8c81fde8ed23fa9e88e8a5caf34f4d12c17a42d4 | /R/arthrisis_school.R | faaeb78b786b67100a2d726e27f9924640c32b53 | [] | no_license | swehip/shprsumfun | 614a7ced82640c0005bc04a35b22bb75a3450cbb | e5b2df4caaea49fcba528a22857b00ca67f17465 | refs/heads/master | 2022-11-09T05:44:31.455275 | 2022-10-19T13:29:48 | 2022-10-19T13:29:48 | 146,577,073 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,263 | r | arthrisis_school.R | #' Arthrisis School attendance
#'
#' Number of arthrisis patients that attended arthrisis school.
#' @param period Year/years of primary operation.
#' @return Data frame with units, period, number of THR (only arthrisis diagnosis, M16.0-M16.9),
#' number of arthrisis question answered and number of participants.
#' @examples
#' # Get number of arthrisis patients that attended arthrisis school for 2018
#' arthrisis_school()
#' # Get number of arthrisis patients that attended arthrisis school for 2016-2017
#' arthrisis_school(2016:2017)
#' # Get number of arthrisis patients that attended arthrisis school for 1900-2020
#' # (all existing years)
#' arthrisis_school(1900:2020)
#' @export
arthrisis_school <- function(period = lubridate::year(Sys.Date())){
require(dplyr)
if(!"dataOperations" %in% ls(envir = .GlobalEnv)){
load("//rc-r/r$/Datalayers/SHPR/.RData", envir = .GlobalEnv)
# rm(list = c("dataComponents", "dataOperations_headers", "dataOperations_vlab",
# "dataProfiles_factors", "dataProfiles_headers", "dataProfiles_vlab",
# "dataPROMAfter_factors", "dataPROMAfter_headers", "dataPROMAfter_vlab",
# "dataPROMBefore_factors", "dataPROMBefore_headers", "dataPROMBefore_vlab",
# "Descriptors", "Excerpt", "Get", "Get.ValueLabels", "Get.VariableHeaders",
# "KV", "Map", "Merge", "R.MakeFactors"), envir = .GlobalEnv)
}
period_string <- ifelse(length(period) == 1, period, paste(period[1], period[length(period)], sep = "-"))
dataOperations$P_ProstType[is.na(dataOperations$P_ProstType)] <- 1
dataPROMBefore <- dplyr::select(dataPROMBefore, -DateCorrectness, -DateOfDeath, -SubjectID)
hosp_dat <- dplyr::select(dataOperations, SubjectKey, P_SurgDate, P_Side, P_Unit, P_ProstType, P_Diagnosis) %>%
dplyr::mutate(P_SurgDate = as.Date(P_SurgDate, tz = "CET"), Operation_year = lubridate::year(P_SurgDate)) %>%
dplyr::filter(Operation_year %in% period,
P_ProstType == 1,
grepl("^M16", P_Diagnosis)) %>%
dplyr::arrange(SubjectKey, P_Side, P_SurgDate) %>%
dplyr::distinct(SubjectKey, P_Side, .keep_all = TRUE) %>%
dplyr::mutate(Unit = attr_to_factor(P_Unit))
prims <- dplyr::distinct(hosp_dat, SubjectKey, P_SurgDate)
# Preop
suppressWarnings(dataPROMBefore2 <- left_join(dataPROMBefore, prims, by = "SubjectKey"))
dataPROMBefore2 <- dplyr::mutate(dataPROMBefore2,
reg_time = as.numeric(difftime(P_SurgDate, PREP_Date, units = "days"))) %>%
dplyr::filter(between(reg_time, 0, 180)) %>%
dplyr::arrange(SubjectKey, P_SurgDate, reg_time) %>%
dplyr::distinct(SubjectKey, P_SurgDate, .keep_all = TRUE)
suppressWarnings(hosp_dat <- left_join(hosp_dat, dataPROMBefore2, by = c("SubjectKey", "P_SurgDate")))
hosp_dat %>%
dplyr::group_by(Unit) %>%
dplyr::summarise(Operation_year = period_string,
OA_THR = n(),
ArthrisisSchool_Answered = sum(!is.na(PREP_ArthrisisSchool)),
Went_ArthrisisSchool = sum(PREP_ArthrisisSchool == 1, na.rm = TRUE)) %>%
dplyr::ungroup() %>%
dplyr::arrange(as.character(Unit)) %>%
as.data.frame() -> prom_dat
return(prom_dat)
}
|
6039365b4704a246393b5915fee0bec4dce5e9ce | 2b5c9578a769743cd993e449e19039486c3416b7 | /final/03-appendix.R | c232903bdc9adb67bb61076b41c3d21205dbf2b3 | [] | no_license | pwkraft/knowledge | 3eebd4d8205058c7b9aed92405f0ee6caeb4d7cc | 71d6dd6f82c017d32aca93dd2bd2106cbf46ee64 | refs/heads/master | 2023-07-07T10:30:41.230585 | 2023-07-04T08:18:39 | 2023-07-04T08:18:39 | 55,650,963 | 7 | 0 | null | null | null | null | UTF-8 | R | false | false | 30,411 | r | 03-appendix.R | # ========================================================================= #
# Project: Women Also Know Stuff (APSR)
# - Script: Additional robustness checks reported in the Appendix
# - Author: Patrick Kraft (patrickwilli.kraft@uc3m.es)
# ========================================================================= #
# Load raw data, packages, and custom functions ---------------------------
source(here::here("00-func.R"))
load(here("data/processed.Rdata"))
# Figure B.1: Total word count across all open-ended responses ------------
## Compute average word count
wc_mean <- tibble(
study = factor(1:6, labels = c("2020 ANES", "2016 ANES", "2012 ANES",
"2018 CES", "2015 YouGov", "Swiss Survey")),
wc = c(mean(anes2020$wc[anes2020$wc != 0]), mean(anes2016$wc[anes2016$wc != 0]),
mean(anes2012$wc[anes2012$wc != 0]), mean(ces2018$wc[ces2018$wc != 0]),
mean(yg2015$wc[yg2015$wc != 0]), mean(swiss2012$wc[swiss2012$wc != 0]))
)
## Create figure
bind_rows(
transmute(anes2020, wc = wc, study = 1),
transmute(anes2016, wc = wc, study = 2),
transmute(anes2012, wc = wc, study = 3),
transmute(ces2018, wc = wc, study = 4),
transmute(yg2015, wc = wc, study = 5),
transmute(swiss2012_fr, wc = wc, study = 6),
transmute(swiss2012_de, wc = wc, study = 6),
transmute(swiss2012_it, wc = wc, study = 6)
) %>%
filter(wc != 0) %>%
mutate(study = factor(study, labels = c("2020 ANES", "2016 ANES", "2012 ANES",
"2018 CES", "2015 YouGov", "Swiss Survey"))) %>%
ggplot(aes(wc)) + geom_histogram(fill = "grey") + theme_classic(base_size = 8) +
theme(panel.border = element_rect(fill=NA)) +
facet_wrap(.~study, ncol = 3, scales = "free") +
geom_vline(aes(xintercept = wc), colour="red", linetype = "longdash",data = wc_mean) +
ylab("Number of Respondents") + xlab("Word Count")
ggsave(here("out/appB1-wc.pdf"), width = 6, height = 2.5)
# Figure B.2: Proportion of non-response comparing male and female --------
## Create figure
bind_rows(
transmute(anes2020, noresp = wc == 0, female = female, study = 1),
transmute(anes2016, noresp = wc == 0, female = female, study = 2),
transmute(anes2012, noresp = wc == 0, female = female, study = 3),
transmute(ces2018, noresp = wc == 0, female = female, study = 4),
transmute(yg2015, noresp = wc == 0, female = female, study = 5),
transmute(swiss2012, noresp = wc == 0, female = female, study = 6)
) %>% mutate(
study = factor(study,
labels = c("2020 ANES", "2016 ANES", "2012 ANES",
"2018 CES", "2015 YouGov", "Swiss Survey")),
Gender = factor(female, labels = c("Male","Female"))) %>%
na.omit() %>%
group_by(study, Gender) %>%
summarize(avg = mean(noresp),
sd = sd(noresp),
n = n(),
cilo = avg - 1.96*sd/sqrt(n),
cihi = avg + 1.96*sd/sqrt(n)) %>%
ggplot(aes(y=avg, x=Gender, ymin=cilo, ymax=cihi)) + plot_default +
geom_bar(stat="identity", fill="grey") + geom_errorbar(width=.1) +
facet_wrap(~study, ncol = 3) + ylab("Average Values") + xlab(NULL) +
guides(fill="none") + scale_fill_discrete(type = c("#5e3c99", "#fdb863"))
ggsave(here("out/appB2-noresponse.pdf"), width = 6, height = 2.5)
# Figure B.3: Estimated topic proportions based on the structural --------
pdf(here("out/appB3-stm_prop.pdf"), width=12, height=10)
par(mfrow = c(2,4), mar=c(4.2,0.5,2.5,0.5))
plot(ces2018disc$stm, n = 5, labeltype = "frex", text.cex = 1,
main = paste0("2018 CES (k = ", ces2018disc$stm$settings$dim$K,")", collapse = ""))
plot(anes2020disc$stm, n = 5, labeltype = "frex", text.cex = 1,
main = paste0("2020 ANES (k = ", anes2020disc$stm$settings$dim$K,")", collapse = ""))
plot(anes2016disc$stm, n = 5, labeltype = "frex", text.cex = 1,
main = paste0("2016 ANES (k = ", anes2016disc$stm$settings$dim$K,")", collapse = ""))
plot(anes2012disc$stm, n = 5, labeltype = "frex", text.cex = 1,
main = paste0("2012 ANES (k = ", anes2012disc$stm$settings$dim$K,")", collapse = ""))
plot(yg2015disc$stm, n = 5, labeltype = "frex", text.cex = 1,
main = paste0("2015 YouGov (k = ", yg2015disc$stm$settings$dim$K,")", collapse = ""))
plot(swiss2012disc_fr$stm, n = 5, labeltype = "frex", text.cex = 1,
main = paste0("Swiss Survey - French (k = ", swiss2012disc_fr$stm$settings$dim$K,")", collapse = ""))
plot(swiss2012disc_de$stm, n = 5, labeltype = "frex", text.cex = 1,
main = paste0("Swiss Survey - German (k = ", swiss2012disc_de$stm$settings$dim$K,")", collapse = ""))
plot(swiss2012disc_it$stm, n = 5, labeltype = "frex", text.cex = 1,
main = paste0("Swiss Survey - Italian (k = ", swiss2012disc_it$stm$settings$dim$K,")", collapse = ""))
dev.off()
# Figure B.4: Correlation matrix of individual components of discu --------
## 2018 CES
ces2018 %>%
transmute(v1 = size, v2 = range, v3 = constraint) %>%
ggpairs(lower = list(continuous = wrap("smooth", alpha = .05, size = .2)),
axisLabels = "none",
columnLabels = c("Size", "Range", "Constraint")) +
plot_default
ggsave(here("out/appB4a-ces2018_components.pdf"), width = 2.6, height = 2.6)
## 2015 YouGov
yg2015 %>%
transmute(v1 = size, v2 = range, v3 = constraint) %>%
ggpairs(lower = list(continuous = wrap("smooth", alpha = .05, size = .2)),
axisLabels = "none",
columnLabels = c("Size", "Range", "Constraint")) +
plot_default
ggsave(here("out/appB4b-yg2015_components.pdf"), width = 2.6, height = 2.6)
## 2020 ANES
anes2020 %>%
transmute(v1 = size, v2 = range, v3 = constraint) %>%
ggpairs(lower = list(continuous = wrap("smooth", alpha = .05, size = .2)),
axisLabels = "none",
columnLabels = c("Size", "Range", "Constraint")) +
plot_default
ggsave(here("out/appB4c-anes2020_components.pdf"), width = 2.6, height = 2.6)
## 2016 ANES
anes2016 %>%
transmute(v1 = size, v2 = range, v3 = constraint) %>%
ggpairs(lower = list(continuous = wrap("smooth", alpha = .05, size = .2)),
axisLabels = "none",
columnLabels = c("Size", "Range", "Constraint")) +
plot_default
ggsave(here("out/appB4d-anes2016_components.pdf"), width = 2.6, height = 2.6)
## 2012 ANES
anes2012 %>%
transmute(v1 = size, v2 = range, v3 = constraint) %>%
ggpairs(lower = list(continuous = wrap("smooth", alpha = .05, size = .2)),
axisLabels = "none",
columnLabels = c("Size", "Range", "Constraint")) +
plot_default
ggsave(here("out/appB4e-anes2012_components.pdf"), width = 2.6, height = 2.6)
## Swiss - French
swiss2012_fr %>%
transmute(v1 = size, v2 = range, v3 = constraint) %>%
ggpairs(lower = list(continuous = wrap("smooth", alpha = .05, size = .2)),
axisLabels = "none",
columnLabels = c("Size", "Range", "Constraint")) +
plot_default
ggsave(here("out/appB4f-french_components.pdf"), width = 2.6, height = 2.6)
## Swiss - German
swiss2012_de %>%
transmute(v1 = size, v2 = range, v3 = constraint) %>%
ggpairs(lower = list(continuous = wrap("smooth", alpha = .05, size = .2)),
axisLabels = "none",
columnLabels = c("Size", "Range", "Constraint")) +
plot_default
ggsave(here("out/appB4g-german_components.pdf"), width = 2.6, height = 2.6)
## Swiss - Italian
swiss2012_it %>%
transmute(v1 = size, v2 = range, v3 = constraint) %>%
ggpairs(lower = list(continuous = wrap("smooth", alpha = .05, size = .2)),
axisLabels = "none",
columnLabels = c("Size", "Range", "Constraint")) +
plot_default
ggsave(here("out/appB4h-italian_components.pdf"), width = 2.6, height = 2.6)
# Figure C.1: PreText analysis of preprocessing decisions of open- --------
## Select raw documents
set.seed(12345)
preText_res <- list(ces2018, anes2020, anes2016, anes2012, yg2015,
swiss2012_fr, swiss2012_de, swiss2012_it) %>%
map(oe_sample) %>%
map(factorial_preprocessing, use_ngrams = FALSE, parallel = TRUE, cores = 12) %>%
map(preText, parallel = TRUE, cores = 12)
names(preText_res) <- c("2018 CES", "2020 ANES", "2016 ANES", "2012 ANES","2015 YouGov",
"Swiss (French)","Swiss (German)","Swiss (Italian)")
## Create plot
preText_res %>%
map(regression_coefficient_plot, remove_intercept = TRUE) %>%
map_dfr("data", .id = "study") %>%
mutate(study = factor(study, levels = c("2018 CES", "2020 ANES", "2016 ANES", "2012 ANES","2015 YouGov",
"Swiss (French)","Swiss (German)","Swiss (Italian)"))) %>%
ggplot(aes(x = Coefficient, xmin = Coefficient-2*SE, xmax = Coefficient+2*SE, y = Variable)) +
geom_point() + geom_errorbarh(height=0) + geom_vline(xintercept = 0) +
facet_wrap(~study, ncol=2) + labs(y=NULL, x = "Regression Coefficient") + plot_default
ggsave(here("out/appC1-pretext.pdf"),width = 6, height = 4.5)
# Figure C.2: Robustness of discursive sophistication measure for --------
## Compute discursive sophistication with alternative specifications
plot_df <- bind_rows(
robust_discursive(data = ces2018, datalab = "2018 CES", K = 35),
robust_discursive(data = anes2020, datalab = "2020 ANES", K = 35),
robust_discursive(data = anes2016, datalab = "2016 ANES", K = 35),
robust_discursive(data = anes2012, datalab = "2012 ANES", K = 35),
robust_discursive(data = yg2015, datalab = "2015 YouGov", K = 35),
robust_discursive(data = swiss2012_fr, datalab = "Swiss (French)", K = 35,
meta = c("age", "educ_cont", "ideo_cont", "educ_ideo", "female"),
language = "french", dictionary = dict_constraint$fr),
robust_discursive(data = swiss2012_de, datalab = "Swiss (German)", K = 35,
meta = c("age", "educ_cont", "ideo_cont", "educ_ideo", "female"),
language = "german", dictionary = dict_constraint$de),
robust_discursive(data = swiss2012_it, datalab = "Swiss (Italian)", K = 35,
meta = c("age", "educ_cont", "ideo_cont", "educ_ideo", "female"),
language = "italian", dictionary = dict_constraint$it),
robust_discursive(data = ces2018, datalab = "2018 CES", stem = FALSE),
robust_discursive(data = anes2020, datalab = "2020 ANES", stem = FALSE),
robust_discursive(data = anes2016, datalab = "2016 ANES", stem = FALSE),
robust_discursive(data = anes2012, datalab = "2012 ANES", stem = FALSE),
robust_discursive(data = yg2015, datalab = "2015 YouGov", stem = FALSE),
robust_discursive(data = swiss2012_fr, datalab = "Swiss (French)", stem = FALSE,
meta = c("age", "educ_cont", "ideo_cont", "educ_ideo", "female"),
language = "french", dictionary = dict_constraint$fr),
robust_discursive(data = swiss2012_de, datalab = "Swiss (German)", stem = FALSE,
meta = c("age", "educ_cont", "ideo_cont", "educ_ideo", "female"),
language = "german", dictionary = dict_constraint$de),
robust_discursive(data = swiss2012_it, datalab = "Swiss (Italian)", stem = FALSE,
meta = c("age", "educ_cont", "ideo_cont", "educ_ideo", "female"),
language = "italian", dictionary = dict_constraint$it),
robust_discursive(data = ces2018, datalab = "2018 CES", removestopwords = FALSE),
robust_discursive(data = anes2020, datalab = "2020 ANES", removestopwords = FALSE),
robust_discursive(data = anes2016, datalab = "2016 ANES", removestopwords = FALSE),
robust_discursive(data = anes2012, datalab = "2012 ANES", removestopwords = FALSE),
robust_discursive(data = yg2015, datalab = "2015 YouGov", removestopwords = FALSE),
robust_discursive(data = swiss2012_fr, datalab = "Swiss (French)", removestopwords = FALSE,
meta = c("age", "educ_cont", "ideo_cont", "educ_ideo", "female"),
language = "french", dictionary = dict_constraint$fr),
robust_discursive(data = swiss2012_de, datalab = "Swiss (German)", removestopwords = FALSE,
meta = c("age", "educ_cont", "ideo_cont", "educ_ideo", "female"),
language = "german", dictionary = dict_constraint$de),
robust_discursive(data = swiss2012_it, datalab = "Swiss (Italian)", removestopwords = FALSE,
meta = c("age", "educ_cont", "ideo_cont", "educ_ideo", "female"),
language = "italian", dictionary = dict_constraint$it)
) %>%
mutate(
datalab = factor(datalab,
levels = c("2018 CES", "2020 ANES", "2016 ANES", "2012 ANES", "2015 YouGov",
"Swiss (French)", "Swiss (German)", "Swiss (Italian)")),
condition = factor(100*K + 10*stem + 1*removestopwords,
levels = c("3511","2501","2510"),
labels = c("More topics (k = 35)", "No stemming", "Keep stopwords"))
)
## Compute correlations for subgroups
plot_cor <- plot_df %>%
group_by(datalab, condition) %>%
summarize(cor = paste0("r = ",round(cor(original, replication), 3))) %>%
mutate(original = 4, replication = -2.5)
## Create figure
ggplot(plot_df, aes(y = original, x = replication)) +
geom_point(alpha=.05) + geom_smooth(method="lm") +
facet_grid(datalab~condition) +
geom_text(data=plot_cor, aes(label=cor), size=2) +
labs(y = "Discursive Sophistication (Preferred Specification)",
x = "Discursive Sophistication (Alternative Specifications)") +
plot_default
ggsave(here("out/appC2-pretext_robustness.png"), width=5, height=8.5, dpi = 600)
# Figure C.3: Controlling for personality and verbal skills ---------------
## Select dependent and independent variables
dvs <- c("vote", "polint", "effic_int", "effic_ext")
ivs <- c("discursive", "polknow",
"female", "age", "black", "educ", "faminc", "relig")
ivs_rob <- c("extraversion + newexperience + reserved", "wordsum", "wc", "mode")
## Estimate models
m1cont <- c(
map(ivs_rob,
~glm(reformulate(c(ivs, .), response = "vote"), data = anes2016, family=binomial("logit"))),
map(ivs_rob,
~lm(reformulate(c(ivs, .), response = "polint"), data = anes2016)),
map(ivs_rob,
~lm(reformulate(c(ivs, .), response = "effic_int"), data = anes2016)),
map(ivs_rob,
~lm(reformulate(c(ivs, .), response = "effic_ext"), data = anes2016)),
map(ivs_rob,
~glm(reformulate(c(ivs, .), response = "vote"), data = anes2012, family=binomial("logit"))),
map(ivs_rob,
~lm(reformulate(c(ivs, .), response = "polint"), data = anes2012)),
map(ivs_rob,
~lm(reformulate(c(ivs, .), response = "effic_int"), data = anes2012)),
map(ivs_rob,
~lm(reformulate(c(ivs, .), response = "effic_ext"), data = anes2012))
)
## Create figure
m1cont %>%
map_dfr(~tidy(comparisons(
., variables = list(discursive = c(-1,1),
polknow = c(-1,1)))),
.id = "model") %>%
as_tibble() %>%
mutate(
study = factor(rep(c("2016 ANES", "2012 ANES"), each = 32),
levels = c("2016 ANES", "2012 ANES")),
dv = recode_factor(rep(rep(dvs, each = 8), 2),
`vote` = "Turnout",
`polint` = "Political Interest",
`effic_int` = "Internal Efficacy",
`effic_ext` = "External Efficacy"),
Control = factor(rep(rep(1:4, each = 2), 8),
labels = c("Personality",
"Wordsum score",
"Response length",
"Survey mode")),
term = recode_factor(term,
`polknow` = "Factual\nKnowledge",
`discursive` = "Discursive\nSophistication")) %>%
ggplot(aes(y=term, x=estimate, xmin=conf.low, xmax=conf.high,
col = Control, shape = Control)) +
geom_vline(xintercept = 0, color="grey") +
geom_point(position = position_dodge(width = -.5)) +
geom_errorbarh(height=0, position = position_dodge(width = -.5)) +
facet_grid(study~dv) +
labs(x = "Estimated Effect of Discursive Sophistication and Factual Knowledge\n(for an increase from 1 SD below mean to 1 SD above mean)", y = "Independent Variable") +
plot_default + theme(legend.position = "bottom")
ggsave(here("out/appC3-knoweff_robust.pdf"), width=6.5, height=3.5)
# Figure C.4: Effects of sophistication on the probability of resp --------
## Select variables
ideo <- c("ideo_dem", "ideo_rep", "ideo_sc", "ideo_trump", "ideo_warren", "ideo_ryan",
"ideo_mcconnel", "ideo_schumer", "ideo_pelosi", "ideo_murkowski", "ideo_collins",
"ideo_feinstein", "ideo_booker", "ideo_haley")
## Create figure
bind_rows(
map_dfr(ideo, ~ideo_compare(., "discursive", filter(ces2018, wc != 0))),
map_dfr(ideo, ~ideo_compare(., "polknow", filter(ces2018, wc != 0)))
) %>%
mutate(
iv = recode_factor(iv,
`discursive` = "Discursive\nSophistication",
`polknow` = "Factual\nKnowledge"),
na_mean = (na_lo + na_hi)/2,
dv = recode_factor(dv,
`ideo_murkowski` = "Lisa Murkowski",
`ideo_collins` = "Susan Collins",
`ideo_booker` = "Cory Booker",
`ideo_haley` = "Nikki Haley",
`ideo_schumer` = "Chuck Schumer",
`ideo_mcconnel` = "Mitch McConnell",
`ideo_feinstein` = "Dianne Feinstein",
`ideo_warren` = "Elizabeth Warren",
`ideo_ryan` = "Paul Ryan",
`ideo_pelosi` = "Nancy Pelosi",
`ideo_sc` = "Supreme Court",
`ideo_trump` = "Donald Trump",
`ideo_rep` = "Republican Party",
`ideo_dem` = "Democratic Party")
) %>%
ggplot(aes(y = dv)) +
geom_vline(xintercept = 0, color="gray") +
geom_segment(aes(x = na_lo, yend = dv,
xend = ifelse(na_hi < na_lo, na_hi + .01, na_hi - .01)),
lineend = "round", linejoin = "mitre",
arrow = arrow(length = unit(0.1, "inches"))) +
geom_point(aes(x = na_lo, shape = "Low (25th Percentile)",
col = iv), size = 2) +
geom_point(aes(x = na_hi, shape = "High (75th Percentile)",
col = iv), size = 2) +
geom_text(aes(x = na_mean, label = na_stars), nudge_y = .25, size = 2.5) +
facet_wrap(~iv) +
scale_color_discrete(type = c("#5e3c99", "#fdb863"), guide = "none") +
scale_x_continuous(labels = scales::percent) +
labs(y = NULL,
x = "'Don't Know' in Ideological Placements (in Percent)",
col = "Sophistication/Knowledge",
shape = "Sophistication/Knowledge") +
plot_default +
theme(legend.position = "bottom")
ggsave(here("out/appC4-placements_dk.pdf"), width = 6.5, height = 4)
# Figure C.5: Effects of sophistication on the uncertainty around --------
## Create figure
bind_rows(
map_dfr(ideo, ~ideo_compare(., "discursive", filter(ces2018, wc != 0))),
map_dfr(ideo, ~ideo_compare(., "polknow", filter(ces2018, wc != 0)))
) %>%
mutate(
iv = recode_factor(iv,
`discursive` = "Discursive\nSophistication",
`polknow` = "Factual\nKnowledge"),
sd_mean = (sd_lo + sd_hi)/2,
dv = recode_factor(dv,
`ideo_sc` = "Supreme Court",
`ideo_collins` = "Susan Collins",
`ideo_feinstein` = "Dianne Feinstein",
`ideo_haley` = "Nikki Haley",
`ideo_murkowski` = "Lisa Murkowski",
`ideo_booker` = "Cory Booker",
`ideo_dem` = "Democratic Party",
`ideo_mcconnel` = "Mitch McConnell",
`ideo_ryan` = "Paul Ryan",
`ideo_schumer` = "Chuck Schumer",
`ideo_rep` = "Republican Party",
`ideo_warren` = "Elizabeth Warren",
`ideo_pelosi` = "Nancy Pelosi",
`ideo_trump` = "Donald Trump")
) %>%
ggplot(aes(y = dv)) +
geom_vline(xintercept = 0, color="gray") +
geom_segment(aes(x = sd_lo, yend = dv,
xend = ifelse(sd_hi < sd_lo, sd_hi + .02, sd_hi - .02)),
lineend = "round", linejoin = "mitre",
arrow = arrow(length = unit(0.1, "inches"))) +
geom_point(aes(x = sd_lo, shape = "Low (25th Percentile)",
col = iv), size = 2) +
geom_point(aes(x = sd_hi, shape = "High (75th Percentile)",
col = iv), size = 2) +
geom_text(aes(x = sd_mean, label = sd_stars), nudge_y = .25, size = 2.5) +
facet_wrap(~iv) +
scale_color_discrete(type = c("#5e3c99", "#fdb863"), guide = "none") +
labs(y = NULL,
x = "Uncertainty in Ideological Placements (in Standard Deviations)",
col = "Sophistication/Knowledge",
shape = "Sophistication/Knowledge") +
plot_default +
theme(legend.position = "bottom")
ggsave(here("out/appC5-placements.pdf"), width = 6.5, height = 4)
# Figure C.6: Expected probability to vote for the senatorial cand --------
## Compute average candidate positions
SenCand1Avg <- ces2018 %>%
group_by(SenCand1Name) %>%
summarize(SenCand1Avg = mean(ideo_cand1, na.rm = T)) %>%
na.omit()
SenCand2Avg <- ces2018 %>%
group_by(SenCand2Name) %>%
summarize(SenCand2Avg = mean(ideo_cand2, na.rm = T)) %>%
na.omit()
## Compare vote choice to relative candidate proximity
ces2018 <- ces2018 %>%
left_join(SenCand1Avg) %>%
left_join(SenCand2Avg) %>%
mutate(correct_vote = as.numeric(abs(ideo_ego - SenCand1Avg) > abs(ideo_ego - SenCand2Avg)) + 1,
correct_vote = as.numeric(correct_vote == senate_vote))
## Estimate model
m1cv <- glm(correct_vote ~ discursive + polknow +
female + age + black + educ + faminc + relig,
data = ces2018, family=binomial("logit"))
## Create figure
bind_rows(
plot_cap(m1cv, condition = c("discursive"), draw = F) %>%
transmute(iv = "discursive", ivval = condition1,
mean = predicted, cilo = conf.low, cihi = conf.high),
plot_cap(m1cv, condition = c("polknow"), draw = F) %>%
transmute(iv = "polknow", ivval = condition1,
mean = predicted, cilo = conf.low, cihi = conf.high)) %>%
as_tibble() %>%
mutate(Variable = recode_factor(iv,
`discursive` = "Discursive Sophistication",
`polknow` = "Factual Knowledge")) %>%
ggplot(aes(x=ivval, y=mean, ymin=cilo,ymax=cihi, lty=Variable, fill=Variable)) + plot_default +
geom_ribbon(alpha=0.6, lwd=.1) + geom_line() +
ylab("Pr(Ideological Proximity Vote)") + xlab("Value of Independent Variable") +
scale_fill_discrete(type = c("#5e3c99", "#fdb863"))
ggsave(here("out/appC6-correct_vote.pdf"), width=4, height=2)
# Figure C.7: Average trust in different news outlets as a functio --------
## Create figure
mturk2019 %>%
select(polknow, discursive,
tv_trust_fox:tv_trust_cbs, print_trust_nyt:print_trust_nyp) %>%
pivot_longer(-c(polknow, discursive),
names_to = "src", values_to = "trust") %>%
pivot_longer(-c(src, trust),
names_to = "iv", values_to = "polknow") %>%
mutate(Variable = recode_factor(iv,
`discursive` = "Discursive Sophistication",
`polknow` = "Factual Knowledge"),
Source = recode_factor(src,
`print_trust_nyt` = "New York Times",
`print_trust_wapo` = "Washington Post",
`print_trust_wsj` = "Wall Street Journal",
`print_trust_ust` = "USA Today",
`print_trust_nyp` = "New York Post",
`tv_trust_cnn` = "CNN",
`tv_trust_nbc` = "NBC",
`tv_trust_cbs` = "CBS",
`tv_trust_msnbc` = "MSNBC",
`tv_trust_fox` = "FOX News"),
Type = ifelse(grepl("print", src), "Print", "TV"),
Type = factor(Type, levels = c("TV","Print"))) %>%
ggplot(aes(y = trust, x = polknow, lty = Variable, fill = Variable)) +
plot_default +
geom_smooth(method = "lm", col = "black", alpha=0.6, lwd=.1) +
labs(y = "Trust that Reporting is Accurate",
x = "Value of Independent Variable") +
theme(legend.position = "bottom") +
facet_wrap(~Type+Source, ncol = 5, dir = "h") +
scale_fill_discrete(type = c("#5e3c99", "#fdb863"))
ggsave(here("out/appC7-media_trust.pdf"), width=6.5, height=4)
# Table C.1: Logistic regression predicting ideological proximity- --------
## Create table
stargazer(m1cv, type="text", align = TRUE, column.sep.width = "-25pt", no.space = TRUE, digits = 3,
model.names=FALSE, dep.var.labels.include = FALSE, star.cutoffs = c(.05,.01,.001),
title="Logistic regression predicting ideological proximity-based voting for
US Senators in the 2018 CES. Standard errors in parentheses.
Estimates are used for Figure \\ref{fig:correct_vote}.",
column.labels = "Ideological Proximity Vote",
covariate.labels = c("Discursive Soph.","Factual Knowledge",
"Female", "Age", "Black", "College Degree",
"Household Income","Church Attendance","Constant"),
keep.stat = c("n", "rsq", "aic"), font.size = "footnotesize",
out = here("out/appC1-correct_vote.tex"), label = "tab:correct_vote")
# Table C.2: Personality, verbal skills, and survey mode as predic --------
## Estimate models
m3rob_disc <- list(
lm(discursive ~ extraversion + newexperience + reserved +
wordsum + mode +
female + age + black + pid_dem + pid_rep +
educ_fact + faminc + relig,
data = anes2016, subset = !is.na(polknow)),
lm(discursive ~ extraversion + newexperience + reserved +
wordsum + mode + polknow +
female + age + black + pid_dem + pid_rep +
educ_fact + faminc + relig,
data = anes2016, subset = !is.na(polknow)),
lm(discursive ~ extraversion + newexperience + reserved +
wordsum + mode +
female + age + black + pid_dem + pid_rep +
educ_fact + faminc + relig,
data = anes2012, subset = !is.na(polknow)),
lm(discursive ~ extraversion + newexperience + reserved +
wordsum + mode + polknow +
female + age + black + pid_dem + pid_rep +
educ_fact + faminc + relig,
data = anes2012, subset = !is.na(polknow))
)
## Create table
stargazer(m3rob_disc, type="text", align = TRUE, column.sep.width = "-5pt", no.space = TRUE, digits = 3,
model.names=FALSE, star.cutoffs = c(.05,.01,.001),
dep.var.labels = "Discursive Sophistication",
title="Personality, verbal skills, and survey mode as predictors
of discursive sophistication in the 2016 and 2012 ANES.",
column.labels = rep(c("2016 ANES", "2012 ANES"), 2),
column.separate = rep(2,4),
covariate.labels = c("Personality: Extraversion",
"Personality: Openness to Experience",
"Personality: Reserved",
"Verbal Skills (Wordsum score)",
"Survey Mode (Online)",
"Factual Knowledge",
"Female","Age", "Black",
"PID: Democrat", "PID: Republican",
"Education: High School", "Education: Some College",
"Education: Bachelor's Degree", "Education: Graduate Degree",
"Household Income", "Church Attendance", "Constant"),
keep.stat = c("n", "rsq"), font.size = "footnotesize",
out = here("out/appC2-determinants_rob_disc.tex"), label = "tab:determinants_rob_disc")
# Table C.3: Personality, verbal skills, and survey mode as predic --------
## Estimate models
m3rob_fact <- list(
lm(polknow ~ extraversion + newexperience + reserved +
wordsum + mode +
female + age + black + pid_dem + pid_rep +
educ_fact + faminc + relig,
data = anes2016, subset = !is.na(discursive)),
lm(polknow ~ extraversion + newexperience + reserved +
wordsum + mode + discursive +
female + age + black + pid_dem + pid_rep +
educ_fact + faminc + relig,
data = anes2016, subset = !is.na(discursive)),
lm(polknow ~ extraversion + newexperience + reserved +
wordsum + mode +
female + age + black + pid_dem + pid_rep +
educ_fact + faminc + relig,
data = anes2012, subset = !is.na(discursive)),
lm(polknow ~ extraversion + newexperience + reserved +
wordsum + mode + discursive +
female + age + black + pid_dem + pid_rep +
educ_fact + faminc + relig,
data = anes2012, subset = !is.na(discursive))
)
## Create table
stargazer(m3rob_fact, type="text", align = TRUE, column.sep.width = "-5pt", no.space = TRUE, digits = 3,
model.names=FALSE, star.cutoffs = c(.05,.01,.001),
dep.var.labels = "Factual Knowledge",
title="Personality, verbal skills, and survey mode as predictors
of factual knowledge in the 2016 and 2012 ANES.",
column.labels = rep(c("2016 ANES", "2012 ANES"), 2),
column.separate = rep(2,4),
covariate.labels = c("Personality: Extraversion",
"Personality: Openness to Experience",
"Personality: Reserved",
"Verbal Skills (Wordsum score)",
"Survey Mode (Online)",
"Discursive Soph.",
"Female","Age", "Black",
"PID: Democrat", "PID: Republican",
"Education: High School", "Education: Some College",
"Education: Bachelor's Degree", "Education: Graduate Degree",
"Household Income", "Church Attendance", "Constant"),
keep.stat = c("n", "rsq"), font.size = "footnotesize",
out = here("out/appC3-determinants_rob_fact.tex"), label = "tab:determinants_rob_fact")
|
fd5a253d55935709f74cf42a2222b00e368f6532 | 975ecf7fb915e2fa05f7ba1157f2e977f1b83576 | /datasets/classification/Audiology/Audiology.r | 0ecec7fd598716961a42d411af7b696cf911a95f | [] | no_license | marticardoso/Learning-with-a-neural-network-based-on-similarity-measures | 24b470172d62e90e2d543670b7ed4c546a3bbbda | a4aae9934c0a02bc2da4a9f9e2769fb2b4a1b36e | refs/heads/master | 2022-09-08T14:41:28.962026 | 2020-05-25T09:44:56 | 2020-05-25T09:44:56 | 186,421,829 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,333 | r | Audiology.r | ###########################################################
# AUDIOLOGY DATA READING AND PREPARATION
###########################################################
audiology.tr <- read.table ("audiology.standardized.data", sep=",", na.strings = "?")
audiology.te <- read.table ("audiology.standardized.test", sep=",", na.strings = "?")
audiology <- rbind (audiology.tr, audiology.te)
dim(audiology)
# Remove identifier
audiology <- audiology[-70]
# Remove non-informative variables
audiology <- audiology[-c(8,9,12,13,16,20,21,22,23,24,28,29,30,31,32,33,34,35,36,41,42,43,44,45,46,47,48,49,50,51,52,55,56,61,63,67,68,69)]
# Remove instances with all values missing
audiology <- Filter(function(x)!all(is.na(x)), audiology)
# Remove instances that do not have a defined class
audiology <- audiology[!is.na(audiology[,"V71"]),]
# Recode target: from to 24 to 4 classes
Target.pos <- 32
colnames(audiology)[Target.pos] <- "Target"
t.oldvalues <- c(
"cochlear_age",
"cochlear_age_and_noise" ,
"cochlear_age_plus_poss_menieres" ,
"cochlear_noise_and_heredity" ,
"cochlear_poss_noise" ,
"cochlear_unknown" ,
"mixed_cochlear_age_fixation" ,
"mixed_cochlear_age_otitis_media" ,
"mixed_cochlear_age_s_om" ,
"mixed_cochlear_unk_discontinuity" ,
"mixed_cochlear_unk_fixation" ,
"mixed_cochlear_unk_ser_om" ,
"mixed_poss_central_om" ,
"mixed_poss_noise_om" ,
"normal_ear" ,
"otitis_media" ,
"poss_central" ,
"possible_brainstem_disorder" ,
"possible_menieres" ,
"retrocochlear_unknown" ,
"acoustic_neuroma" ,
"bells_palsy" ,
"conductive_discontinuity" ,
"conductive_fixation"
)
t.newvalues <- factor (c(rep("Cochlear",6), rep("Mixed",8), "Normal", rep("Other",9))) # Make this a factor
audiology$Target <- t.newvalues[ match(audiology$Target, t.oldvalues) ]
if (preproc == "sim" || preproc == "std")
{
# Recode predictors appropriately
# V2: air()
audiology$V2 <- ordered(audiology$V2)
# V4: ar_c()
oldvalues <- c("absent","normal","elevated")
newvalues <- factor(c("1Absent","2Normal","3Elevated"), ordered=TRUE)
audiology$V4 <- newvalues[ match(audiology$V4, oldvalues) ]
# V5: ar_u()
oldvalues <- c("absent","normal","elevated")
newvalues <- factor(c("1Absent","2Normal","3Elevated"), ordered=TRUE)
audiology$V5 <- newvalues[ match(audiology$V5, oldvalues) ]
# V6: bone()
# WARNING: there is a value 'unmeasured', which we also code as NA
audiology$V6[audiology$V6=="unmeasured"] <- NA
audiology$V6 <- ordered(audiology$V6)
# V59: o_ar_c()
oldvalues <- c("absent","normal","elevated")
newvalues <- factor(c("1Absent","2Normal","3Elevated"), ordered=TRUE)
audiology$V59 <- newvalues[ match(audiology$V59, oldvalues) ]
# V60: o_ar_u()
oldvalues <- c("absent","normal","elevated")
newvalues <- factor(c("1Absent","2Normal","3Elevated"), ordered=TRUE)
audiology$V60 <- newvalues[ match(audiology$V60, oldvalues) ]
# V64: speech()
# WARNING: same as before
audiology$V64[audiology$V64=="unmeasured"] <- NA
oldvalues <- c("very_poor","poor","normal","good","very_good")
newvalues <- factor(c("1very_poor","2poor","3normal","4good","5very_good"), ordered=TRUE)
audiology$V64 <- newvalues[ match(audiology$V64, oldvalues) ]
}
if (preproc == "std")
{
# Imputing the missing values by iterated least squares regressions
audiology.mice <- mice(audiology, m=1)
audiology <- complete(audiology.mice)
}
if (preproc == "raw")
{
# Imputing the missing values by a 0
audiology[,-Target.pos] <- factorsNumeric (audiology[,-Target.pos])
audiology[is.na(audiology)] <- 0
}
# Proceed ...
N <- nrow(audiology)
if (shuffle) { audiology <- audiology[sample.int(N),] }
if (preproc == "raw" || scale) { audiology[,-Target.pos] <- scale(audiology[,-Target.pos]) }
# For Gower
simil.types <- list()
# Split target and predictors
Targets <- audiology$Target
Predictors <- audiology[,-Target.pos]
Nlearn <- 200
dataset <- audiology
learn <- 1:Nlearn
Ntest <- N - Nlearn
|
9a374c5e96113bdb16971293431932f065faaa6b | c194c5236006a758b29bd4d530ad563dc9ecab7e | /inst/apps/distribution_gamma_functions/server.R | 7068db46f82609acaa90ec486232f1dcc6979e80 | [] | no_license | Auburngrads/teachingApps | 1087c20a21992433a2f8451db7b1eaa7d1d2cb89 | b79c192e5f74c5e8376674d4fb9e0b95a426fe03 | refs/heads/master | 2021-03-16T07:49:56.579527 | 2020-06-14T12:10:12 | 2020-06-14T12:10:12 | 51,677,745 | 15 | 7 | null | 2018-03-01T03:44:58 | 2016-02-14T03:22:47 | R | UTF-8 | R | false | false | 2,487 | r | server.R | server = function(input, output, session) {
gam.t = reactive({ signif(seq(min(input$range.gamma), max(input$range.gamma), length = 500), digits = 4)})
gam.p <- signif(seq(0, 1, length = 500), digits = 4)
gam.C <- reactive({ pgamma(gam.t(), input$kappa, input$theta)})
gam.P <- reactive({ dgamma(gam.t(), input$kappa, input$theta)})
gam.R <- reactive({ 1-gam.C()})
gam.h <- reactive({ exp(log(gam.P())-log(gam.R()))})
gam.H <- reactive({ -1*log(1-pgamma(gam.t(), input$kappa, input$theta))})
gam.Q <- reactive({ qgamma(gam.p, input$kappa, input$theta)})
gam.df <- reactive({data.frame(Time = gam.t(),
PROB = gam.p,
CDF = gam.C(),
PDF = gam.P(),
REL = gam.R(),
haz = gam.h(),
HAZ = gam.H(),
QUANT = gam.Q())})
output$gammaC <- renderMetricsgraphics({
mjs_plot(gam.df(), x = Time, y = CDF, decimals = 4, top = 0) %>%
mjs_line(area = TRUE) %>%
mjs_labs(x_label = 'Time', y_label = 'F(t)')%>%
mjs_add_css_rule("{{ID}} .mg-active-datapoint { font-size: 20pt }")})
output$gammaP <- renderMetricsgraphics({
mjs_plot(gam.df(), x = Time, y = PDF, decimals = 4) %>%
mjs_line(area = TRUE) %>%
mjs_labs(x_label = 'Time', y_label = 'f(t)') %>%
mjs_add_css_rule("{{ID}} .mg-active-datapoint { font-size: 20pt }")})
output$gammaR <- renderMetricsgraphics({
mjs_plot(gam.df(), x = Time, y = REL, decimals = 4) %>%
mjs_line(area = TRUE) %>%
mjs_labs(x_label = 'Time', y_label = 'S(t)') %>%
mjs_add_css_rule("{{ID}} .mg-active-datapoint { font-size: 20pt }")})
output$gammah <- renderMetricsgraphics({
mjs_plot(gam.df(), x = Time, y = haz, decimals = 4) %>%
mjs_line(area = TRUE) %>%
mjs_labs(x_label = 'Time', y_label = 'h(t)') %>%
mjs_add_css_rule("{{ID}} .mg-active-datapoint { font-size: 20pt }")})
output$gammaH <- renderMetricsgraphics({
mjs_plot(gam.df(), x = Time, y = HAZ, decimals = 4) %>%
mjs_line(area = TRUE) %>%
mjs_labs(x_label = 'Time', y_label = 'H(t)') %>%
mjs_add_css_rule("{{ID}} .mg-active-datapoint { font-size: 20pt }")})
output$gammaQ <- renderMetricsgraphics({
mjs_plot(gam.df(), x = PROB, y = QUANT, decimals = 4) %>%
mjs_line(area = TRUE) %>%
mjs_labs(x_label = 'Probability', y_label = 'q(t)') %>%
mjs_add_css_rule("{{ID}} .mg-active-datapoint { font-size: 20pt }")})
} |
9c24bf170b5a398c42420f2fb2fc36cbc741a409 | 26151b679705ae098e79fab6c98ca68c093a4f09 | /Classification_Techniques/ADiscriminante_Practica_1.R | 82cad23a5dd0e0a2347e448518e58628f5fe08d8 | [] | no_license | octadelsueldo/Master_DS_CUNEF | 2808e5f5fbf564c7c3fdf209d699056ecd75a8af | 354f9d4e43cbcf9d47edd85cfcb5010b417a8b3a | refs/heads/main | 2023-08-21T22:54:16.712139 | 2021-10-06T22:33:32 | 2021-10-06T22:33:32 | 414,352,378 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,123 | r | ADiscriminante_Practica_1.R | #----------------------------------------------------------------------
# MDS - Tecnicas de Clasificacion
# Analisis Discriminante
# Practica 1
#----------------------------------------------------------------------
#aqui la distribucion de los riesgos por empresa podemos verlas como multinomiales donde las proporciones son las probabilidad de la
#multinomial entre una empresa u otra.
#Aqui se debe asignar el x a la empresa que tiene menor verosimilitud
rm(list=ls())
# Probabilidades de cada Grupo
A <- c(0.25, 0.6, 0.15) #proporciones
B <- c(0.25, 0.7, 0.05) #proporciones
# Datos: frecuencias de cada clase
x <- c(8, 15,1) #datos
# Logaritmo de las verosimilitudes
LL1 <- t(log(A)) %*% x #produc de los logaritmos de los x en una empresa u otra
LL1
LL2 <- t(log(B)) %*% x
LL2
# Clasificacion: Primera forma
ifelse(LL1-LL2>0,"Empresa 1","Empresa 2") #si esto es mayor q uno nos quedamos con la empresa uno sino con la dos
# Clasificacion: Segunda forma (equivalente)
z<-t(log(A/B)) %*% x
ifelse(z>0,"Empresa 1","Empresa 2") #la alternativa es utilizando logaritmos haciendo que la diferencia sea positiva
|
7d7aa01ab99b72804211be49020c9633d16a8b05 | 4622517f2786656d17dac3e745a4a73ea5fc70fe | /Environment.R | 341e11eb654032a842913853e7741c21e2d9bcf3 | [] | no_license | ynren1020/AdvancedR | a20565c685692e2df71cba2dcf9b3d94f2a26a1b | 07b4976703d59bde5a871acffa865e65c8421679 | refs/heads/main | 2022-12-30T09:20:30.722610 | 2020-10-18T03:33:31 | 2020-10-18T03:33:31 | 305,013,055 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,172 | r | Environment.R | #Environments are data structures in R that have special properties with regard to their role
#in how R code is executed and how memory in R is organized.
my_new_env <- new.env()
my_new_env$x <- 4
my_new_env$x
#[1] 4
assign("y", 9, envir = my_new_env)
get("y", envir = my_new_env)
#[1] 9
my_new_env$y
#[1] 9
ls(my_new_env)
#[1] "x" "y"
rm(y, envir = my_new_env)
exists("y", envir = my_new_env)
#[1] TRUE
exists("x", envir = my_new_env)
#[1] TRUE
my_new_env$x
#[1] 4
my_new_env$y
#NULL
# Execution Environments
x <- 10 (#global environment)
my_func <- function(){
x <- 5
return(x)
}
my_func() #execute environment
# [1] 5
#------------
x <- 10
another_func <- function(){
return(x)
}
another_func()
#[1] 10
#----------
x <- 10
x
#[1] 10
assign1 <- function(){
x <<- "Wow!"
}
assign1()
x
#[1] "Wow!"
#-----------
a_variable_name
#Error in eval(expr, envir, enclos): object 'a_variable_name' not found
exists("a_variable_name")
#[1] FALSE
assign2 <- function(){
a_variable_name <<- "Magic!"
}
assign2()
exists("a_variable_name")
#[1] TRUE
a_variable_name
#[1] "Magic!"
# Error
#The stop() function will generate an error.
#The stopifnot() function takes a series of logical expressions as arguments and
#if any of them are false an error is generated specifying which expression is false.
error_if_n_is_greater_than_zero <- function(n){
stopifnot(n <= 0)
n
}
error_if_n_is_greater_than_zero(5)
#Error: n <= 0 is not TRUE
warning("Consider yourself warned!")
#Warning: Consider yourself warned!
message("In a bottle.")
#In a bottle.
# trycatch
beera <- function(expr){
tryCatch(expr,
error = function(e){
message("An error occurred:\n", e)
},
warning = function(w){
message("A warning occured:\n", w)
},
finally = {
message("Finally done!")
})
}
is_even_error <- function(n){
tryCatch(n %% 2 == 0,
error = function(e){
FALSE
})
}
is_even_error(714)
[1] TRUE
is_even_error("eight")
[1] FALSE
gHdvLEHvncWFAjNG
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.