blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
187622b9e47585e360ce77b71d3a870aadbf8234
|
e8aab8c1784eff9bb21f7e27f669b272a76fdda2
|
/run_analysis.R
|
ffdada4183f8cda8a0b3ba48e189dcde4e8b198e
|
[] |
no_license
|
mkim7/3-getting-cleaning-data
|
cd98317c2467389e483d45433c9172a719fb03e8
|
d4c5f9680ea74bc22b44eca609d4e02d8981c4d6
|
refs/heads/master
| 2020-05-02T02:26:53.199533
| 2014-06-22T04:23:17
| 2014-06-22T04:23:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,219
|
r
|
run_analysis.R
|
# download from url and unzip
setwd("C:/Users/Owner/Documents/R")
zipurl<-"http://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
if(!file.exists("./data")){dir.create("./data")}
download.file(zipurl, destfile="./data/project.zip")
unzip("./data/project.zip", exdir="./data")
# name files
subject_train = read.table("./data/UCI HAR Dataset/train/subject_train.txt", header=FALSE)
X_train = read.table("./data/UCI HAR Dataset/train/X_train.txt", header=FALSE)
Y_train = read.table("./data/UCI HAR Dataset/train/Y_train.txt", header=FALSE)
subject_test = read.table("./data/UCI HAR Dataset/test/subject_test.txt", header=FALSE)
X_test = read.table("./data/UCI HAR Dataset/test/X_test.txt", header=FALSE)
Y_test = read.table("./data/UCI HAR Dataset/test/Y_test.txt", header=FALSE)
activity = read.table("./data/UCI HAR Dataset/activity_labels.txt", header=FALSE)
features = read.table("./data/UCI HAR Dataset/features.txt", header=FALSE)
# assign column names
colnames(subject_train) = "subjectID"
colnames(X_train) = features[,2]
colnames(Y_train) = "activityID"
colnames(subject_test) = "subjectID"
colnames(X_test) = features[,2]
colnames(Y_test) = "activityID"
colnames(activity) = c('activityID', 'activityType')
# Beginning of project, step-by-step
# 1. Merges the training and the test sets to create one data set.
train = cbind(Y_train, subject_train, X_train)
test = cbind(Y_test, subject_test, X_test)
alldata = rbind(train, test)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# to idenfity columns that we want, use following:
good <- grep("-mean\\(\\)|-std\\(\\)", features[,2])
# subset "alldata" based on the above, keeping only those that meet requirement
alldata2 = alldata[,good]
# 3. Uses descriptive activity names to name the activities in the data set
# merge "alldata2" with "activity," by "activityID"
alldata3 = merge(alldata2, activity, by='activityID', all.x=TRUE)
# 4. Appropriately labels the data set with descriptive variable names.
# assign name to colnames of "alldata3", replace names using for loop, then reassign those changed names to colnames of "alldata3"
newcolumns = colnames(alldata3)
for (i in seq(along = newcolumns))
{
newcolumns[i] = gsub("\\()", "", newcolumns[i])
newcolumns[i] = gsub("Mag", "Magnitude", newcolumns[i])
newcolumns[i] = gsub("^(t)", "time", newcolumns[i])
newcolumns[i] = gsub("^(f)", "frequency", newcolumns[i])
newcolumns[i] = gsub("BodyBody", "Body", newcolumns[i])
newcolumns[i] = gsub(",", "", newcolumns[i])
}
colnames(alldata3) = newcolumns
# completed cleaning
finaldata <- alldata3
# 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
## https://class.coursera.org/getdata-004/forum/thread?thread_id=262 regarding how tidy data should look
# covert "finaldata" to data table
final.table <-data.table(finaldata)
# calculate average of each variable for each activity, each subset
tidy.table <-final.table[, lapply(.SD, mean), by=list(activityID,subjectID)]
#save tidy data
write.table(tidy.table, file = 'uci-tidydata.txt', sep="\t")
|
cd0d6209a92d4084cff398e377c15ac77b9b8c85
|
b66c3d2fa20a2d0b0cfea23e88d7b030128ce24f
|
/R_Code/randomforest_bagging_ensemble.R
|
b5f8f2a36ad724414aefc06f65d5c20d348ae45e
|
[] |
no_license
|
AGARNER18/Russian-Market
|
059af69320c10cc67cb994901196ed2734a9d9d4
|
d70e3b85b833086fdb3f3116389e415f2183fea0
|
refs/heads/master
| 2020-06-18T16:09:47.091566
| 2017-12-12T23:04:07
| 2017-12-12T23:04:07
| 94,169,463
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,259
|
r
|
randomforest_bagging_ensemble.R
|
# Column price_doc was added to test data set in excel to match columns for rbind
# NA were replaced with blank spaces in excel for both the training and test data sets
# format of time stamp was changed for easier manipulation to MM/DD/YY format
install.packages("Hmisc")
install.packages("Metrics")
install.packages("randomForest")
install.packages("dplyr")
library(Hmisc)
library(randomForest)
library(Metrics)
library(dplyr)
#Set Working Directory
setwd("C:/Users/Amber/Desktop/kaggle")
#1-30473
train<-read.csv("train_without_noise_date.csv")
#30474-38135
test<-read.csv("russia_test_date.csv")
c <- rbind(train, test)
macro<-read.csv("macro_date.csv")
macro<-macro[complete.cases(macro),]
c <- merge(c, macro, by=("timestamp"),all.x = TRUE)
summary(c)
# remove variables of no use
c$sub_area<-NULL
c$ID_railroad_station_avto<-NULL
c$X0_13_all<-NULL
c$X0_13_female<-NULL
c$X0_13_male<-NULL
c$X0_17_all<-NULL
c$X0_17_female<-NULL
c$X0_17_male<-NULL
c$X0_6_female<-NULL
c$X0_6_male<-NULL
c$X0_6_all<-NULL
c$X0_16_29_female<-NULL
c$X0_16_29_all<-NULL
c$X0_16_29_male<-NULL
c$X0_7_14_female<-NULL
c$X0_7_14_male<-NULL
c$X0_7_14_all<-NULL
c$overdue_wages_per_cap<-NULL
#c$additiol_education_km<-NULL
#c$additiol_education_raion<-NULL
#c$area_m<-NULL
#c$basketball_km<-NULL
#c$big_church_count_1000<-NULL
#c$big_church_count_1500<-NULL
#c$big_church_count_500<-NULL
#c$big_church_count_2000<-NULL
#c$big_church_count_5000<-NULL
#c$big_church_km<-NULL
#c$big_market_km<-NULL
#c$big_market_raion<-NULL
#c$big_road1_1line<-NULL
#c$big_road1_km<-NULL
#c$big_road2_km<-NULL
#c$build_count_1921.1945<-NULL
#c$build_count_1946.1970<-NULL
#c$build_count_1946.1970<-NULL
#c$build_count_1971.1995<-NULL
c$build_count_after_1995<-with(data=c, impute(build_count_after_1995, mean))
c$build_count_before_1920<-with(data=c, impute(build_count_before_1920, mean))
c$build_count_block<-NULL
c$build_count_brick<-NULL
c$build_count_foam<-NULL
c$build_count_frame<-NULL
c$build_count_mix<-NULL
c$build_count_monolith<-NULL
c$build_count_panel<-NULL
c$build_count_slag<-NULL
c$build_count_wood<-NULL
#c$bulvar_ring_km<-NULL
#c$bus_termil_avto_km<-NULL
c$cafe_avg_price_1000<-NULL
c$cafe_avg_price_1500<-NULL
c$cafe_avg_price_2000<-NULL
c$cafe_avg_price_3000<-NULL
c$cafe_avg_price_500<-NULL
c$cafe_avg_price_5000<-NULL
c$cafe_count_1000<-NULL
c$cafe_count_1000_price_1500<-NULL
c$cafe_count_1000_price_2500<-NULL
c$cafe_count_1000_price_4000<-NULL
c$cafe_count_1000_price_500<-NULL
c$cafe_count_1000_price_high<-NULL
#c$cafe_count_1500<-NULL
c$cafe_count_1500__price<-NULL
c$cafe_count_1500__price_1000<-NULL
c$cafe_count_1500__price_1500<-NULL
c$cafe_count_1500__price_2500<-NULL
c$cafe_count_1500__price_4000<-NULL
c$cafe_count_1500__price_500<-NULL
c$cafe_count_1500__price_high<-NULL
#c$cafe_count_2000<-NULL
c$cafe_count_2000__price<-NULL
c$cafe_count_2000__price_1000<-NULL
c$cafe_count_2000__price_1500<-NULL
c$cafe_count_2000__price_2500<-NULL
c$cafe_count_2000__price_4000<-NULL
c$cafe_count_2000__price_500<-NULL
c$cafe_count_2000__price_high<-NULL
#c$cafe_count_3000<-NULL
c$cafe_count_3000__price<-NULL
c$cafe_count_3000__price_1000<-NULL
c$cafe_count_3000__price_1500<-NULL
c$cafe_count_3000__price_2500<-NULL
c$cafe_count_3000__price_4000<-NULL
c$cafe_count_3000__price_500<-NULL
c$cafe_count_3000__price_high<-NULL
#c$cafe_count_500<-NULL
c$cafe_count_500_price<-NULL
c$cafe_count_500_price_1000<-NULL
c$cafe_count_500_price_1500<-NULL
c$cafe_count_500_price_2500<-NULL
c$cafe_count_500_price_4000<-NULL
c$cafe_count_500_price_500<-NULL
c$cafe_count_500_price_high<-NULL
#c$cafe_count_5000<-NULL
c$cafe_count_5000_price<-NULL
c$cafe_count_5000_price_1000<-NULL
c$cafe_count_5000_price_1500<-NULL
c$cafe_count_5000_price_2500<-NULL
c$cafe_count_5000_price_4000<-NULL
c$cafe_count_5000_price_500<-NULL
c$cafe_count_5000_price_high<-NULL
c$cafe_sum_1000_max_price_avg<-NULL
c$cafe_sum_1000_min_price_avg<-NULL
c$cafe_sum_1500_max_price_avg<-NULL
c$cafe_sum_1500_min_price_avg<-NULL
c$cafe_sum_2000_max_price_avg<-NULL
c$cafe_sum_2000_min_price_avg<-NULL
c$cafe_sum_3000_max_price_avg<-NULL
c$cafe_sum_3000_min_price_avg<-NULL
c$cafe_sum_5000_max_price_avg<-NULL
c$cafe_sum_5000_min_price_avg<-NULL
c$cafe_sum_500_max_price_avg<-NULL
c$cafe_sum_500_min_price_avg<-NULL
#c$catering_km<-NULL
#c$cemetery_km<-NULL
#c$children_preschool<-NULL
#c$children_school<-NULL
#c$church_count_1000<-NULL
#c$church_count_1500<-NULL
#c$church_count_3000<-NULL
#c$church_count_2000<-NULL
#c$church_count_500<-NULL
#c$church_count_5000<-NULL
#c$church_sygogue_km<-NULL
#c$culture_objects_top_25<-NULL
#c$detention_facility_raion<-NULL
#c$ekder_all<-NULL
#c$ekder_female<-NULL
#c$ekder_male<-NULL
#c$exhibition_km<-NULL
c$female_f<-NULL
#c$fitness_km<-NULL
c$full_all<-NULL
#c$green_part_1000<-NULL
#c$green_part_1500<-NULL
#c$green_part_2000<-NULL
#c$green_part_3000<-NULL
#c$green_part_5000<-NULL
#c$green_part_500<-NULL
c$green_zone_km<-NULL
c$green_zone_part<-NULL
c$healthcare_centers_raion<-NULL
c$hospice_morgue_km<-NULL
#c$ice_rink_km<-NULL
#c$incineration_km<-NULL
c$incineration_raion<-NULL
#c$industrial_km<-NULL
c$timestamp<-NULL
c$hospital_bed_occupancy_per_year<-NULL
c$hospital_beds_available_per_cap<-NULL
c$provision_retail_space_sqm<-NULL
c$turnover_catering_per_cap<-NULL
c$provision_retail_space_modern_sqm<-NULL
c$housing_fund_sqm<-NULL
#Find factor variables and levels
is.fact <- sapply(c, is.factor)
factors.df <- c[, is.fact]
lapply(factors.df, levels)
c$child_on_acc_pre_school<-as.numeric(c$child_on_acc_pre_school)
c$modern_education_share<-as.numeric(c$modern_education_share)
c$old_education_build_share<-as.numeric(c$old_education_build_share)
# find missing values in each variable
apply(is.na(c),2,sum)
# impute
c$hospital_beds_raion<-with(data=c, impute(hospital_beds_raion, mean))
c$build_year<-with(data=c, impute(build_year, mean))
c$ID_railroad_station_walk<-with(c, impute(ID_railroad_station_walk, mean))
c$floor<-with(c, impute(floor, mean))
c$kitch_sq<-with(c, impute(kitch_sq, mean))
c$life_sq<-with(c, impute(life_sq, mean))
c$material<-with(c, impute(material, mean))
c$max_floor<-with(c, impute(max_floor, mean))
c$metro_km_walk<-with(c, impute(metro_km_walk, mean))
c$metro_min_walk<-with(c, impute(metro_min_walk, mean))
c$num_room<-with(c, impute(num_room, mean))
c$preschool_quota<-with(c, impute(preschool_quota, mean))
c$prom_part_5000<-with(c, impute(prom_part_5000, mean))
c$railroad_station_walk_km<-with(c, impute(railroad_station_walk_km, mean))
c$railroad_station_walk_min<-with(c, impute(railroad_station_walk_min, mean))
c$raion_build_count_with_builddate_info<-with(c, impute(raion_build_count_with_builddate_info, mean))
c$raion_build_count_with_material_info<-with(c, impute(raion_build_count_with_material_info, mean))
c$school_quota<-with(c, impute(school_quota, mean))
c$state<-with(c, impute(state, mean))
c$build_count_1921.1945<-with(c, impute(build_count_1921.1945, mean))
c$build_count_1946.1970<-with(c, impute(build_count_1946.1970, mean))
c$build_count_1971.1995<-with(c, impute(build_count_1971.1995, mean))
c$green_part_2000<-with(c, impute(green_part_2000, mean))
c$preschool_education_centers_raion<-with(c, impute(preschool_education_centers_raion, mean))
c$state<-with(c,impute(c$state, mode))
summary(c)
# transformations
#c$public_healthcare_km<-log(c$public_healthcare_km+1)
#c$full_sq<-log(c$full_sq+1)
#c$leisure_count_1000<-log(c$leisure_count_1000+1)
#c$cafe_count_1000__price<-log(c$cafe_count_1000__price+1)
#c$office_count_500<-log(c$office_count_500+1)
#c$cafe_count_1000_price_1000<-log(c$cafe_count_1000_price_1000+1)
#c$big_church_count_3000<-log(c$big_church_count_3000+1)
#c$power_transmission_line_km<-log(c$power_transmission_line_km+1)
#c$lodging_sqm_per_cap<-log(c$lodging_sqm_per_cap+1)
#c$provision_doctors<-log(c$provision_doctors+1)
#c$construction_value<-log(c$construction_value+1)
#c$average_life_exp<-log(c$average_life_exp+1)
#c$unprofitable_enterpr_share<-log(c$unprofitable_enterpr_share+1)
#c$mosque_count_500<-(c$mosque_count_500+1)*exp(2)
#c$public_transport_station_min_walk<-log(c$public_transport_station_min_walk+1)
#c$lodging_sqm_per_cap<-log(c$lodging_sqm_per_cap+1)
c<-na.roughfix(c)
write.csv(c, "combined.csv")
# seperate into training and test sets
#1-30473
a<-c[(c$id<=30473),]
b<-c[(c$id>30473),]
# remove ID for model building
a<-(a[,(-1)])
a<-na.roughfix(a)
# outlier removal
library(outliers)
a$raion_popul<-rm.outlier(a$raion_popul, fill=TRUE, median=TRUE)
a$school_education_centers_raion<-rm.outlier(a$school_education_centers_raion, fill=TRUE, median=TRUE)
a$school_education_centers_top_20_raion<-rm.outlier(a$school_education_centers_top_20_raion, fill=TRUE, median=TRUE)
a$university_top_20_raion<-rm.outlier(a$university_top_20_raion, fill=TRUE, median=TRUE)
a$sport_objects_raion<-rm.outlier(a$sport_objects_raion, fill=TRUE, median=TRUE)
a$culture_objects_top_25_raion<-rm.outlier(a$culture_objects_top_25_raion, fill=TRUE, median=TRUE)
a$shopping_centers_raion<-rm.outlier(a$shopping_centers_raion, fill=TRUE, median=TRUE)
a$office_raion<-rm.outlier(a$office_raion, fill=TRUE, median=TRUE)
a$male_f<-rm.outlier(a$male_f, fill=TRUE, median=TRUE)
a$young_all<-rm.outlier(a$young_all, fill=TRUE, median=TRUE)
a$young_male<-rm.outlier(a$young_male, fill=TRUE, median=TRUE)
a$young_female<-rm.outlier(a$young_female, fill=TRUE, median=TRUE)
a$work_all<-rm.outlier(a$work_all, fill=TRUE, median=TRUE)
a$work_male<-rm.outlier(a$work_male, fill=TRUE, median=TRUE)
a$work_female<-rm.outlier(a$work_female, fill=TRUE, median=TRUE)
a$X7_14_all<-rm.outlier(a$X7_14_all, fill=TRUE, median=TRUE)
a$X7_14_male<-rm.outlier(a$X7_14_male, fill=TRUE, median=TRUE)
a$X7_14_female<-rm.outlier(a$X7_14_female, fill=TRUE, median=TRUE)
a$X16_29_all<-rm.outlier(a$X16_29_all, fill=TRUE, median=TRUE)
a$X16_29_male<-rm.outlier(a$X16_29_male, fill=TRUE, median=TRUE)
a$X16_29_female<-rm.outlier(a$X16_29_female, fill=TRUE, median=TRUE)
a$office_sqm_500<-rm.outlier(a$office_sqm_500, fill=TRUE, median=TRUE)
a$trc_count_500<-rm.outlier(a$trc_count_500, fill=TRUE, median=TRUE)
a$trc_sqm_500<-rm.outlier(a$trc_sqm_500, fill=TRUE, median=TRUE)
a$mosque_count_500<-rm.outlier(a$mosque_count_500, fill=TRUE, median=TRUE)
a$leisure_count_500<-rm.outlier(a$leisure_count_500, fill=TRUE, median=TRUE)
a$sport_count_500<-rm.outlier(a$trc_count_500, fill=TRUE, median=TRUE)
a$market_count_500<-rm.outlier(a$trc_count_500, fill=TRUE, median=TRUE)
a$office_count_1000<-rm.outlier(a$office_count_1000, fill=TRUE, median=TRUE)
a$office_sqm_1000<-rm.outlier(a$office_sqm_1000, fill=TRUE, median=TRUE)
a$trc_count_1000<-rm.outlier(a$trc_count_1000, fill=TRUE, median=TRUE)
a$trc_sqm_1000<-rm.outlier(a$trc_sqm_1000, fill=TRUE, median=TRUE)
a$mosque_count_1000<-rm.outlier(a$mosque_count_1000, fill=TRUE, median=TRUE)
a$sport_count_1000<-rm.outlier(a$sport_count_1000, fill=TRUE, median=TRUE)
a$market_count_1000<-rm.outlier(a$market_count_1000, fill=TRUE, median=TRUE)
a$office_count_1500<-rm.outlier(a$office_count_1500, fill=TRUE, median=TRUE)
a$office_sqm_1500<-rm.outlier(a$office_sqm_1500, fill=TRUE, median=TRUE)
a$trc_count_1500<-rm.outlier(a$trc_count_1500, fill=TRUE, median=TRUE)
a$mosque_count_1500<-rm.outlier(a$mosque_count_1500, fill=TRUE, median=TRUE)
a$leisure_count_1500<-rm.outlier(a$leisure_count_1500, fill=TRUE, median=TRUE)
a$sport_count_1500<-rm.outlier(a$sport_count_1500, fill=TRUE, median=TRUE)
a$market_count_1500<-rm.outlier(a$market_count_1500, fill=TRUE, median=TRUE)
a$office_count_2000<-rm.outlier(a$office_count_2000, fill=TRUE, median=TRUE)
a$office_sqm_2000<-rm.outlier(a$office_sqm_2000, fill=TRUE, median=TRUE)
a$trc_count_2000<-rm.outlier(a$trc_count_2000, fill=TRUE, median=TRUE)
a$trc_sqm_2000<-rm.outlier(a$trc_sqm_2000, fill=TRUE, median=TRUE)
a$mosque_count_2000<-rm.outlier(a$mosque_count_2000, fill=TRUE, median=TRUE)
a$leisure_count_2000<-rm.outlier(a$leisure_count_2000, fill=TRUE, median=TRUE)
a$sport_count_2000<-rm.outlier(a$sport_count_2000, fill=TRUE, median=TRUE)
a$market_count_2000<-rm.outlier(a$market_count_2000, fill=TRUE, median=TRUE)
a$office_count_3000<-rm.outlier(a$office_count_3000, fill=TRUE, median=TRUE)
a$office_sqm_3000<-rm.outlier(a$office_sqm_3000, fill=TRUE, median=TRUE)
a$trc_count_3000<-rm.outlier(a$trc_count_3000, fill=TRUE, median=TRUE)
a$trc_sqm_3000<-rm.outlier(a$trc_sqm_3000, fill=TRUE, median=TRUE)
a$mosque_count_3000<-rm.outlier(a$mosque_count_3000, fill=TRUE, median=TRUE)
a$leisure_count_3000<-rm.outlier(a$leisure_count_3000, fill=TRUE, median=TRUE)
a$sport_count_3000<-rm.outlier(a$sport_count_3000, fill=TRUE, median=TRUE)
a$market_count_3000<-rm.outlier(a$market_count_3000, fill=TRUE, median=TRUE)
a$office_count_5000<-rm.outlier(a$office_count_5000, fill=TRUE, median=TRUE)
a$office_sqm_5000<-rm.outlier(a$office_sqm_5000, fill=TRUE, median=TRUE)
a$trc_count_5000<-rm.outlier(a$trc_count_5000, fill=TRUE, median=TRUE)
a$trc_sqm_5000<-rm.outlier(a$trc_sqm_5000, fill=TRUE, median=TRUE)
a$mosque_count_5000<-rm.outlier(a$mosque_count_5000, fill=TRUE, median=TRUE)
a$leisure_count_5000<-rm.outlier(a$leisure_count_5000, fill=TRUE, median=TRUE)
a$sport_count_5000<-rm.outlier(a$sport_count_5000, fill=TRUE, median=TRUE)
a$market_count_5000<-rm.outlier(a$market_count_5000, fill=TRUE, median=TRUE)
a$deposits_value<-rm.outlier(a$deposits_value, fill=TRUE, median=TRUE)
a$mortgage_value<-rm.outlier(a$mortgage_value, fill=TRUE, median=TRUE)
a$overdue_wages_per_cap<-rm.outlier(a$overdue_wages_per_cap, fill=TRUE, median=TRUE)
a$housing_fund_sqm<-rm.outlier(a$housing_fund_sqm, fill=TRUE, median=TRUE)
a$hospital_beds_available_per_cap<-rm.outlier(a$hospital_beds_available_per_cap, fill=TRUE, median=TRUE)
a$hospital_bed_occupancy_per_year<-rm.outlier(a$hospital_bed_occupancy_per_year, fill=TRUE, median=TRUE)
a$provision_retail_space_sqm<-rm.outlier(a$provision_retail_space_sqm, fill=TRUE, median=TRUE)
a$provision_retail_space_modern_sqm<-rm.outlier(a$provision_retail_space_modern_sqm, fill=TRUE, median=TRUE)
a$turnover_catering_per_cap<-rm.outlier(a$turnover_catering_per_cap, fill=TRUE, median=TRUE)
#a$theaters_viewers_per_1000_cap<-rm.outlier(a$theaters_viewers_per_1000_cap, fill=TRUE, median=TRUE)
#a$museum_visitis_per_100_cap<-rm.outlier(a$museum_visitis_per_100_cap, fill=TRUE, median=TRUE)
#a$bandwidth_sports<-rm.outlier(a$bandwidth_sports, fill=TRUE, median=TRUE)
#a$apartment_build<-rm.outlier(a$apartment_build, fill=TRUE, median=TRUE)
a$full_sq<-rm.outlier(a$full_sq, fill=TRUE, median=TRUE)
a$life_sq<-rm.outlier(a$life_sq, fill=TRUE, median=TRUE)
a$floor<-rm.outlier(a$floor, fill=TRUE, median=TRUE)
a$max_floor<-rm.outlier(a$max_floor, fill=TRUE, median=TRUE)
a$material<-rm.outlier(a$material, fill=TRUE, median=TRUE)
a$build_year<-rm.outlier(a$build_year, fill=TRUE, median=TRUE)
a$kitch_sq<-rm.outlier(a$kitch_sq, fill=TRUE, median=TRUE)
a$raion_popul<-rm.outlier(a$raion_popul, fill=TRUE, median=TRUE)
a$indust_part<-rm.outlier(a$indust_part, fill=TRUE, median=TRUE)
a$preschool_quota<-rm.outlier(a$preschool_quota, fill=TRUE, median=TRUE)
a$preschool_education_centers_raion<-rm.outlier(a$preschool_education_centers_raion, fill=TRUE, median=TRUE)
a$school_quota<-rm.outlier(a$school_quota, fill=TRUE, median=TRUE)
a$school_education_centers_raion<-rm.outlier(a$school_education_centers_raion, fill=TRUE, median=TRUE)
a$school_education_centers_top_20_raion<-rm.outlier(a$school_education_centers_top_20_raion, fill=TRUE, median=TRUE)
a$university_top_20_raion<-rm.outlier(a$university_top_20_raion, fill=TRUE, median=TRUE)
a$sport_objects_raion<-rm.outlier(a$sport_objects_raion, fill=TRUE, median=TRUE)
a$culture_objects_top_25_raion<-rm.outlier(a$culture_objects_top_25_raion, fill=TRUE, median=TRUE)
a$shopping_centers_raion<-rm.outlier(a$shopping_centers_raion, fill=TRUE, median=TRUE)
a$office_raion<-rm.outlier(a$office_raion, fill=TRUE, median=TRUE)
a$male_f<-rm.outlier(a$male_f, fill=TRUE, median=TRUE)
a$young_male<-rm.outlier(a$young_male, fill=TRUE, median=TRUE)
a$young_all<-rm.outlier(a$young_all, fill=TRUE, median=TRUE)
a$raion_build_count_with_material_info<-rm.outlier(a$raion_build_count_with_material_info, fill=TRUE, median=TRUE)
a$raion_build_count_with_builddate_info<-rm.outlier(a$raion_build_count_with_builddate_info, fill=TRUE, median=TRUE)
a$metro_min_avto<-rm.outlier(a$metro_min_avto, fill=TRUE, median=TRUE)
a$metro_km_avto<-rm.outlier(a$metro_km_avto, fill=TRUE, median=TRUE)
a$metro_min_walk<-rm.outlier(a$metro_min_walk, fill=TRUE, median=TRUE)
a$metro_km_walk<-rm.outlier(a$metro_km_walk, fill=TRUE, median=TRUE)
a$kindergarten_km<-rm.outlier(a$kindergarten_km, fill=TRUE, median=TRUE)
a$school_km<-rm.outlier(a$school_km, fill=TRUE, median=TRUE)
a$park_km<-rm.outlier(a$park_km, fill=TRUE, median=TRUE)
a$water_treatment_km<-rm.outlier(a$water_treatment_km, fill=TRUE, median=TRUE)
a$railroad_station_walk_km<-rm.outlier(a$railroad_station_walk_km, fill=TRUE, median=TRUE)
a$railroad_station_walk_min<-rm.outlier(a$railroad_station_walk_min, fill=TRUE, median=TRUE)
a$railroad_station_avto_km<-rm.outlier(a$railroad_station_avto_km, fill=TRUE, median=TRUE)
a$railroad_station_avto_min<-rm.outlier(a$railroad_station_avto_min, fill=TRUE, median=TRUE)
a$public_transport_station_km<-rm.outlier(a$public_transport_station_km, fill=TRUE, median=TRUE)
a$public_transport_station_min_walk<-rm.outlier(a$public_transport_station_min_walk, fill=TRUE, median=TRUE)
a$water_km<-rm.outlier(a$water_km, fill=TRUE, median=TRUE)
a$mkad_km<-rm.outlier(a$mkad_km, fill=TRUE, median=TRUE)
a$ttk_km<-rm.outlier(a$ttk_km, fill=TRUE, median=TRUE)
a$sadovoe_km<-rm.outlier(a$sadovoe_km, fill=TRUE, median=TRUE)
a$kremlin_km<-rm.outlier(a$kremlin_km, fill=TRUE, median=TRUE)
a$railroad_km<-rm.outlier(a$railroad_km, fill=TRUE, median=TRUE)
a$zd_vokzaly_avto_km<-rm.outlier(a$zd_vokzaly_avto_km, fill=TRUE, median=TRUE)
a$oil_chemistry_km<-rm.outlier(a$oil_chemistry_km, fill=TRUE, median=TRUE)
a$nuclear_reactor_km<-rm.outlier(a$nuclear_reactor_km, fill=TRUE, median=TRUE)
# remove price_doc column for test data
b<-b[,(-225)]
apply(is.na(a),2,sum)
# random forest with 800
set.seed(1235)
model_no_outliers<-randomForest(price_doc~.,data=a, ntree=25, corr.bias=TRUE)
model_no_outliers_grown<-grow(model_no_outliers, 475)
set.seed(1234)
model <- randomForest(price_doc~.,data=a,ntree=25, corr.bias=TRUE)
model2<-grow(model, 1075)
model3<-grow(model, 1000)
#model <- randomForest(price_doc~full_sq+zd_vokzaly_avto_km+public_healthcare_km+num_room+big_church_count_3000+leisure_count_1000+cafe_count_1000__price+prom_part_5000+sport_count_3000+power_transmission_line_km+mosque_count_3000+nuclear_reactor_km+mosque_count_3000+detention_facility_km+trc_count_5000+floor+sport_count_1500+indust_part+ecology,data=a,ntree=25)
# calculate mean squared log error
error<-sqrt(msle(a$price_doc, (predict(model2, a))))
error
# find multicollinearity
vif(lm(price_doc~full_sq+zd_vokzaly_avto_km+public_healthcare_km+num_room+big_church_count_3000+leisure_count_1000+cafe_count_1000__price+prom_part_5000+sport_count_3000+power_transmission_line_km+mosque_count_3000+nuclear_reactor_km+mosque_count_3000+detention_facility_km+trc_count_5000+floor+sport_count_1500+indust_part+ecology,data=a,ntree=800))
#random forest with 1500
model_1500 <- randomForest(price_doc~full_sq+zd_vokzaly_avto_km+public_healthcare_km+num_room+big_church_count_3000+leisure_count_1000+cafe_count_1000__price+prom_part_5000+sport_count_3000+power_transmission_line_km+mosque_count_3000+nuclear_reactor_km+mosque_count_3000+detention_facility_km+trc_count_5000+floor+sport_count_1500+indust_part+ecology,data=a,ntree=1500)
# calcualte mean squared log error with 1500
error_1500<-msle(a$price_doc, (predict(model2, a)))
error
# random forest with 800 and mtry 100 and corr.bias
model2 <- randomForest(price_doc~full_sq+zd_vokzaly_avto_km+public_healthcare_km+num_room+big_church_count_3000+leisure_count_1000+cafe_count_1000__price+prom_part_5000+sport_count_3000+power_transmission_line_km+mosque_count_3000+nuclear_reactor_km+mosque_count_3000+detention_facility_km+trc_count_5000+floor+sport_count_1500+indust_part+ecology,data=a,ntree=1000, mtry=10, corr.bias=TRUE)
model3<-randomForest(price_doc~full_sq+zd_vokzaly_avto_km+public_healthcare_km+num_room+big_church_count_3000+leisure_count_1000+cafe_count_1000__price+prom_part_5000+sport_count_3000+power_transmission_line_km+mosque_count_3000+nuclear_reactor_km+mosque_count_3000+detention_facility_km+trc_count_5000+floor+sport_count_1500+indust_part+ecology,data=a,ntree=50, corr.bias=TRUE)
# calcualte mean squared log error without collinearity
error<-msle(a$price_doc, (predict(model5, a)))
error_untuned<-msle(a$price_doc, (predict(model3, a)))
#compare error
table(sqrt(error_tuned), sqrt(error_untuned))
install.packages("ipred")
library(ipred)
model5<-bagging(price_doc~., data=a, nbagg=100)
#model4<-nnet(price_doc~.,data=a, size=100, MaxNWts=150000, decay = 0.0001,maxit = 500)
pred<-predict(model2, a)
pred3<-predict(model5, a)
a$rforest<-pred
a$bagg<-pred3
a$total<-a$rforest+a$bagg
a$average<-a$total/2
error<-msle(a$price_doc, a$average)
error
#Predict for test data
pred <- predict(model2, b)
pred3<-predict(model5, b)
b$rforest<-pred
b$bagg<-pred3
b$total<-b$rforest+b$bagg
b$average<-b$total/2
predictions <- data.frame(id=b$id, price_doc=b$average)
write.csv(predictions, "bagging_rforest_3.csv")
# Ideas
#add year of timestamp or month or season
#try lasso with glmnet function
#try linear svr
#try ridge regression
# lasso
lm.fit <- glm(price_doc~., data=c)
# normalize formula
maxs <- apply(c, 2, max)
mins <- apply(c, 2, min)
c<- as.data.frame(scale(c, center = mins, scale = maxs - mins))
https://www.r-bloggers.com/fitting-a-neural-network-in-r-neuralnet-package/
|
4bc724b924f6e73903ae18516699736a6d8c9aec
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/hdnom/examples/hdnom.calibrate.Rd.R
|
fdda5b08eb978bb72cb8eeab30cd43afb90297e3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 986
|
r
|
hdnom.calibrate.Rd.R
|
library(hdnom)
### Name: hdnom.calibrate
### Title: Calibrate High-Dimensional Cox Models
### Aliases: hdnom.calibrate
### ** Examples
library("survival")
# Load imputed SMART data
data("smart")
x = as.matrix(smart[, -c(1, 2)])
time = smart$TEVENT
event = smart$EVENT
y = Surv(time, event)
# Fit Cox model with lasso penalty
fit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11)
# Model calibration by fitting the original data directly
cal.fitting = hdnom.calibrate(
x, time, event, model.type = "lasso",
alpha = 1, lambda = fit$lasso_best_lambda,
method = "fitting",
pred.at = 365 * 9, ngroup = 5,
seed = 1010)
# Model calibration by 5-fold cross-validation
cal.cv = hdnom.calibrate(
x, time, event, model.type = "lasso",
alpha = 1, lambda = fit$lasso_best_lambda,
method = "cv", nfolds = 5,
pred.at = 365 * 9, ngroup = 5,
seed = 1010)
print(cal.fitting)
summary(cal.fitting)
plot(cal.fitting)
print(cal.cv)
summary(cal.cv)
plot(cal.cv)
|
f9e0be9d3be87011167b75ecca3004a186e26122
|
7b102f9c8f2e3f9240090d1d67af50333a2ba98d
|
/gbd_2019/nonfatal_code/nonfatal_injuries/crosswalking/mr_brt_adjustment/07_save_xwalk_version.R
|
7ec7c87668cdb9acc9a68fe691ba6ae1ed469ac9
|
[] |
no_license
|
Nermin-Ghith/ihme-modeling
|
9c8ec56b249cb0c417361102724fef1e6e0bcebd
|
746ea5fb76a9c049c37a8c15aa089c041a90a6d5
|
refs/heads/main
| 2023-04-13T00:26:55.363986
| 2020-10-28T19:51:51
| 2020-10-28T19:51:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 471
|
r
|
07_save_xwalk_version.R
|
source('FILEPATH/save_crosswalk_version.R')
bundle <- commandArgs()[6]
version <- commandArgs()[7]
name <- commandArgs()[8]
print('starting script')
bundle_version_id <- version
data_filepath <-
paste0(
'FILEPATH',
as.character(bundle),
'_',
name,
'_adjusted.xlsx'
)
description <- 'step 4 crosswalks'
result <- save_crosswalk_version(
bundle_version_id = bundle_version_id,
data_filepath = data_filepath,
description = description
)
|
9d896b71d918ec20db132d68c2a124fe495cce6d
|
56efee66de74609ccff457427236520ac661ea5c
|
/man/tdmEnvTUpdate.Rd
|
16f6a63f1f809720460c565e4c79038b07569054
|
[] |
no_license
|
cran/TDMR
|
cc3e8b256624cd53eb350ce34e32ef34c92fe1ab
|
0554aaaa8c3f828a3e2e9c898deddf7cc9f4d609
|
refs/heads/master
| 2021-01-18T23:27:21.160115
| 2020-03-02T16:20:02
| 2020-03-02T16:20:02
| 17,693,854
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 405
|
rd
|
tdmEnvTUpdate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tdmEnvTMakeNew.r
\name{tdmEnvTUpdate}
\alias{tdmEnvTUpdate}
\title{Update \code{envT$tdm}}
\usage{
tdmEnvTUpdate(envT, tdm)
}
\arguments{
\item{envT}{environment TDMR}
\item{tdm}{list for TDMR, see \code{\link{tdmDefaultsFill}}}
}
\value{
envT
}
\description{
Update \code{envT$tdm} with the non-NULL elements of \code{tdm}
}
|
9af4359a94debc53c962d6344307ca314a1a30f5
|
98a1ac6724e2a3d093e7aabd3bae476c6fe9dd47
|
/man/moranplotmap.Rd
|
3bdf426e047f3dd94c842ff03e8659f346f4dceb
|
[] |
no_license
|
Abson-dev/GeoXp
|
f99ed11d16ca9044b496eab9a03a0d5b73cc8638
|
742911d53f99cdd95296dcf92b259aac710eaf5d
|
refs/heads/master
| 2020-12-31T09:49:28.270689
| 2013-08-14T00:00:00
| 2013-08-14T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,202
|
rd
|
moranplotmap.Rd
|
\name{moranplotmap}
\alias{moranplotmap}
\title{Moran scatterplot and map}
\description{
The function \code{moranplotmap()} draws a moran plot, used to detect spatial
autocorrelation in the variable var. On the x-axis, is represented \eqn{x-\bar{x}}{x-bar(x)}
and on the y-axis \eqn{W(x-\bar{x})}{W(x-bar(x))}, where W is the spatial weight matrix.
It also calcultes Moran's I statistic (see \code{nonnormoran}) and
give a p-value associated to the autocorrelation test (gaussian version and permutation version). }
\usage{
moranplotmap(sp.obj, name.var, listw.obj, flower=FALSE, locmoran=FALSE,
names.arg=c("H.-H.","L.-H.","L.-L.","H.-L."), names.attr=names(sp.obj), criteria=NULL,
carte=NULL, identify=FALSE, cex.lab=0.8, pch=16, col="lightblue3",
xlab="", ylab="", axes=FALSE, lablong="", lablat="")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{sp.obj}{object of class extending Spatial-class}
\item{name.var}{a character; attribute name or column number in attribute table}
\item{listw.obj}{object of class listw}
\item{flower}{if TRUE, link neighbouring sites }
\item{locmoran}{if TRUE, print local Moran's I statistic on the Moran plot}
\item{names.arg}{names of the quadrant of the Moran plot}
\item{names.attr}{names to use in panel (if different from the names of variable used in sp.obj)}
\item{criteria}{a vector of boolean of size the number of spatial units, which permit to represent preselected sites with a cross, using the tcltk window}
\item{carte}{matrix with 2 columns for drawing spatial polygonal contours : x and y coordinates of the vertices of the polygon}
\item{identify}{if not FALSE, identify plotted objects (currently only working for points plots). Labels for identification are the row.names of the attribute table row.names(as.data.frame(sp.obj)).}
\item{cex.lab}{character size of label}
\item{pch}{16 by default, symbol for selected points}
\item{col}{"lightblue3" by default, color of bars on the histogram}
\item{xlab}{a title for the graphic x-axis}
\item{ylab}{a title for the graphic y-axis}
\item{axes}{a boolean with TRUE for drawing axes on the map}
\item{lablong}{name of the x-axis that will be printed on the map}
\item{lablat}{name of the y-axis that will be printed on the map}
}
\details{
For the permutation test, for each drawing the values of the variable \code{var} are
randomly assigned to the sites. We then calculate Moran'I statistic associated to each drawing and we give the frequency of drawings when Moran'I statistic is lower or equal to the
observed Moran'I statistic. Moreover, the function gives the opportunity to link neighbouring sites and gives Local Moran's I statistic.
For a site i:
\deqn{I_i=(x_i-\bar{x})\sum_{j}W_{ij}(x_j-\bar{x})}{I_i=(x_i-bar(x))sum_jW_ij(x_j-bar(x))}
with j not equal to i.}
\note{
In the case of the spatial weigth matrix is not normalized, the Moran'I statistic is not
equal to \eqn{\beta}{beta} used in regression line for model \eqn{W(X-\bar{X})=\beta(X-\bar{X})+u}{W(X-bar(X))=beta(X-bar(X))+u}. That is why
the regression line is only drawn in the case of W normalized.}
\value{
In the case where user click on \code{save results} button,
a list is created as a global variable in \code{last.select} object. \$obs, a vector of integer,
corresponds to the number of spatial units selected just before leaving the Tk window, \$MORAN,
a numeric, corresponds to the value of the Moran'I statistic.
}
\references{Thibault Laurent, Anne Ruiz-Gazen, Christine Thomas-Agnan (2012), GeoXp: An R Package for Exploratory Spatial Data Analysis. \emph{Journal of Statistical Software}, 47(2), 1-23. \cr \cr
Roger S.Bivand, Edzer J.Pebesma, Virgilio Gomez-Rubio (2009), \emph{Applied Spatial Data Analysis with R}, Springer. \cr \cr
Jim Lesage, ``Spatial Econometrics Toolbox'', \url{http://www.spatial-econometrics.com/}}
\author{Aragon Y., Thomas-Agnan C., Ruiz-Gazen A., Laurent T., Robidou L.}
\keyword{spatial}
\examples{
##
# data baltimore
data(baltimore)
# SpatialPoints
baltimore.sp <- SpatialPoints(cbind(baltimore$X,baltimore$Y))
# SpatialPointsDataFrame
baltimore.spdf<-SpatialPointsDataFrame(baltimore.sp,baltimore)
# Spatial Weight Matrix
W.nb <- knn2nb(knearneigh(baltimore.sp, k=4))
# We choose a row standardized spatial weight matrix :
W.listw <- nb2listw(W.nb,style="W")
# moranplotmap with some options
moranplotmap(baltimore.spdf, "PRICE", W.listw ,
flower=TRUE, locmoran=TRUE,criteria=(baltimore.spdf$AC==1),
identify=TRUE)
# comparison with the moran.test function
moran.test(baltimore.spdf$PRICE,W.listw)
##
# data columbus
require("maptools")
example(columbus)
# use of moranplotmap with spatial weight matrix col.gal.nb :
# 1. row-standardized
moranplotmap(columbus,"HOVAL",nb2listw(col.gal.nb,style="W"))
# 2. basic binary
moranplotmap(columbus,"HOVAL",nb2listw(col.gal.nb,style="B"))
# 3. globally standardized
moranplotmap(columbus,"HOVAL",nb2listw(col.gal.nb,style="C"))
}
\seealso{\code{\link{neighbourmap}},\code{\link{makeneighborsw}},\code{\link{normw}},\code{\link{nonormmoran}} }
|
6b535983b5c7c286280cad55618327a09a1a8ecb
|
0202519b1c23a1b49d0809bf94b29c04bf3e1a60
|
/server.R
|
924f7b5c7d2259fcf6408ca48defb5749f92a52b
|
[] |
no_license
|
dtriepke/FallzahlPlanung
|
57ecd638089f34cc9b8d84ac835f280e2da1dc93
|
9f162745e8d35e96456895ff049653ee5deb023a
|
refs/heads/master
| 2020-03-22T03:34:22.411336
| 2018-07-02T12:32:17
| 2018-07-02T12:32:17
| 139,439,398
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,737
|
r
|
server.R
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define server logic required to draw
shinyServer(function(input, output) {
output$Plot <- renderPlot({
alpha <- input$alpha
p <- input$power
n1 <- r <- seq(0.1,10,0.01)
eff <- input$eff
for( i in 1:length(r)){
eff2dummy <- 2
n1[i] <- 2
while ( eff < sqrt(eff2dummy) ) { # Schleife zur Bestimmung von n1
n1[i] <- n1[i] + 1
eff2dummy <- (qt(p = p, df = n1[i] * (1 + r[i]) - 2) +
qt(p = 1 - alpha, df = n1[i] * (1 + r[i]) - 2))^2 /
n1[i] * (1 + 1 / r[i])
}
}
n2 <- n1 * r
n <- n1 + n2
plot( log(r), n1, type = "l", col = "red", lwd = 2.5, ylab = "n",
sub = paste("r = 40:60 => {n = ", n[which(r == round(40/60,2) )],
", n1 = ", n1[which( r == round(40/60,2) )],
", n2 = ", n2[which(r == round(40/60,2) )],
" rel. distance to r = 50:50 ", round(n[which(r == round(40/60,2) )] / min(n), 2), "}" ))
lines( log(r), ceiling(n1 * r), col = "green")
lines( log(r), n1 + ceiling(n1 *r), col ="skyblue")
legend("top", legend = c(expression(n[1]), expression(n[2]),"n"), lwd = 3,
lty = 1, col = c("red", "green", "skyblue"))
text(x = 0, y = min(n1 + n1 * r) + min(n1), labels = paste("n1 = n2 = ", min(n1 + n1 * r)/2))
})
#output$Text1 <- renderText(
# paste("50/50: n = ", min(n),", n1 = ", min(n)/2, ", n2 = ", min(n)/2 ))
})
|
ab4467de4ea494e445b3a4005056ecaad688584c
|
5f82d1bc22e4ef72a63c58852a2d035e124f1a37
|
/man/filter.Rd
|
3b8e41af0510312816360680be909761ff8bc2c5
|
[] |
no_license
|
cran/bupaR
|
75608804ef045f678821740aaff123991d5d36b5
|
ef020af22301e7aa8c82d62e4d01dd5aebaea99e
|
refs/heads/master
| 2023-04-20T17:49:49.645967
| 2023-04-02T21:00:06
| 2023-04-02T21:00:06
| 86,215,725
| 0
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 523
|
rd
|
filter.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filter.R
\docType{import}
\name{filter}
\alias{filter}
\title{Filter event log}
\arguments{
\item{.data}{\code{\link{log}}: Object of class \code{\link{eventlog}} or \code{\link{activitylog}}.}
\item{...}{Conditions to filter on}
}
\keyword{internal}
\description{
These objects are imported from other packages. Follow the links
below to see their documentation.
\describe{
\item{dplyr}{\code{\link[dplyr]{filter}}}
}}
|
49a85296aa5a4c27b1b36073cde962cd45597fea
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/EcoGenetics/R/eco.remove.R
|
75906d8b76707a4e0e85e343e70baf416ca7d49d
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,200
|
r
|
eco.remove.R
|
#' Creating an updated ecogen object by removing results of the slot OUT
#'
#' @param eco Object of class "ecogen".
#' @param ... Objects to remove from eco, typed without quotations.
#'
#' @examples
#'
#' \dontrun{
#'
#' data(eco.test)
#' variog <- eco.variogram(eco[["P"]][, 1], eco[["XY"]])
#'
#' # Assignation of values can be made with the corresponding accessors,
#' # using the generic notation of EcoGenetics
#' # (<ecoslot.> + <name of the slot> + <name of the object>).
#' # See help("EcoGenetics accessors")
#'
#' ecoslot.OUT(eco) <- variog
#' we.are.numbers <- c(1:10)
#' we.are.characters <- c("John Coltrane", "Charlie Parker")
#' ecoslot.OUT(eco) <- list(we.are.numbers, we.are.characters)
#' ecoslot.OUT(eco)
#' eco <- eco.remove(eco, we.are.numbers)
#' ecoslot.OUT(eco)
#'
#' }
#'
#' @author Leandro Roser \email{leandroroser@@ege.fcen.uba.ar}
#'
#' @export
setGeneric("eco.remove",
function(eco, ...) {
res.names <- as.character(match.call())
res.names <- res.names[-c(1:2)]
del <- (names(eco@OUT)) %in% res.names
eco@OUT <- eco@OUT[!del]
eco
})
|
cfe8005f9930da9916c0a2dd38a76f400662f415
|
c1463667cf3ff1057859b4bbd956b7e1737bc187
|
/Word Clouds in R.R
|
6bd9c93098a0280bcc32a59af80c34d0ee295d2c
|
[] |
no_license
|
kshirasaagar/R-U-Ready
|
0c6ce8d8d0bb297a754d2229c86ff268755720d7
|
1caf81814cdd9cc779771f763f34bbfa2fc424c9
|
refs/heads/master
| 2021-08-03T19:23:51.091305
| 2021-01-28T23:06:36
| 2021-01-28T23:06:36
| 30,675,127
| 1
| 1
| null | 2021-01-28T23:08:06
| 2015-02-11T23:24:22
|
R
|
UTF-8
|
R
| false
| false
| 348
|
r
|
Word Clouds in R.R
|
#Word Clouds in R
library(wordcloud)
if(require(tm)){
data(crude)
crude <- tm_map(crude, removePunctuation)
crude <- tm_map(crude, function(x)removeWords(x,stopwords()))
tdm <- TermDocumentMatrix(crude)
m <- as.matrix(tdm)
v <- sort(rowSums(m),decreasing=TRUE)
d <- data.frame(word = names(v),freq=v)
wordcloud(d$word,d$freq)
}
|
a0fe089bf5540c6393220a263f126532de4e67e5
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/AeRobiology/examples/trend_plot.Rd.R
|
9db23878f5961937b45392c37eb2189daf4e2399
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 264
|
r
|
trend_plot.Rd.R
|
library(AeRobiology)
### Name: trend_plot
### Title: Calculating and Plotting Trends of Pollen Data (summary plot).
### Aliases: trend_plot
### ** Examples
data("munich")
trend_plot(munich, interpolation = FALSE, export.result = FALSE, export.plot = FALSE)
|
39837859bc38be5461807793ed22b54262561228
|
6d443800445592a4bcdc3531a850d5152942e2fd
|
/GUI/browse_dataset.R
|
a28c8231032e1c57c29926d846a8a96c68d92da3
|
[] |
no_license
|
angy89/InsideNano
|
35f2004414bd1065df4db686ceefdb2096b789da
|
0b5ee4502106740acc3daec100cac37f015791d3
|
refs/heads/master
| 2021-01-18T21:11:38.811196
| 2016-01-10T20:23:47
| 2016-01-10T20:23:47
| 45,189,493
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,111
|
r
|
browse_dataset.R
|
plot_gene_network = function(input,output,g,g_geni2){
output$geneNetwork = renderForceNetwork({
groups_path = input$Patway_g
validate(
need(input$Patway_g != "", "Please select a Pathway")
)
if(DEBUGGING)
cat("Number of groups ",length(groups_path),"\n")
good_index = which(V(g_geni2)$group %in% groups_path)
geni_toPlot = igraph::induced.subgraph(graph = g_geni2,vids = V(g_geni2)$name[good_index])
geni_toPlot = igraph::delete.vertices(graph = geni_toPlot,v = which(igraph::degree(geni_toPlot)<1))
data_frame = get.data.frame(x = geni_toPlot,what = "both")
edges = data_frame$edges
edges$value = round(abs(edges$weight * 10),digits = 0)
colnames(edges) = c("source","target","weight","value")
vertices = data_frame$vertices
vertices$size = igraph::degree(geni_toPlot)
colnames(vertices) = c("name","group","type","size")
for(i in 1:dim(edges)[1]){
edges[i,"source"] = which(vertices[,"name"] %in% edges[i,"source"]) - 1
edges[i,"target"] = which(vertices[,"name"] %in% edges[i,"target"]) - 1
}
vertices$name = as.factor(vertices$name)
vertices$group = as.factor(vertices$group)
vertices$size = as.numeric(vertices$size)
vertices$type = as.factor(vertices$type)
edges$source = as.integer(edges$source)
edges$target = as.integer(edges$target)
edges$value = as.integer(edges$value)
MyClickScript <- 'd3.select(this).select("circle").transition().duration(750).attr("r", 30)'
MyClickScript2 <- 'd3.select(this).select(dLink.target).select("circle").transition().duration(750).attr("r",30),
d3.select(dLink.target).select("circle").transition().duration(750).attr("r",30)'
forceNetwork(Links = edges, Nodes = vertices,
Source = "source", Target = "target",
Value = "value", NodeID = "name",Nodesize="size",
zoom = TRUE,opacity = 0.85,fontSize = 10,Group = "group",
legend = TRUE, height = input$gene_width,width =input$gene_height,
clickAction = MyClickScript,charge = -input$gene_repulserad,
linkDistance = JS(paste0("function(d){return d.value*",input$gene_length,"}")))
})
}
plot_item_network_pie = function(input,output,W2_ADJ){
output$pie_chart <- renderPlot({
ADJ_toPlot = W2_ADJ
edges_type = input$Edges
type = input$checkGroup
ATC_code = input$ATCGroup
if(ATC_code == "ALL"){
ATC_code = ATC_letter_vector
}
chem_group = input$ChemicalGroup
if(chem_group == "ALL"){
chem_group = chemical_group_vector
}
if(DEBUGGING)
cat(type," ",length(type)," ",ATC_code," ",chem_group,"\n")
validate(
need(input$checkGroup != "", "Please select an object group")
)
items = c()
items_type = c()
items_lengt = c()
#items_color = c()
for(i in 1:length(type)){
if(type[i]=="nano"){
items = c(items,nano)
items_type = c(items_type,"nano")
items_lengt = c(items_lengt,length(nano))
}
if(type[i]=="drugs"){
# validate(
# need(input$ATCGroup != "", "Please select an ATC group")
# )
already_selected = c()
for(j in 1:length(ATC_code)){
ATC_lev1 = substr(join10$code,1,1)
ATC_index = which(ATC_lev1 %in% ATC_code[j])
new_drugs = unique(join10$name[ATC_index])
index_new_drugs = which((new_drugs %in% already_selected)==FALSE)
already_selected = new_drugs[index_new_drugs]
toRem = which(new_drugs[index_new_drugs] %in% items)
if(length(toRem)>0){
index_new_drugs = index_new_drugs[-toRem]
}
items = c(items,new_drugs[index_new_drugs])
items_type = c(items_type,paste("Drugs ATC: ", ATC_code[j],sep=""))
items_lengt = c(items_lengt,length(new_drugs[index_new_drugs]))
}
}
if(type[i]=="chemical"){
# validate(
# need(input$ChemicalGroup != "", "Please select a chemical group")
# )
already_selected = c()
for(j in 1:length(chem_group)){
chem_index = which(chemMat[,2] %in% chem_group[j])
new_chem = unique(chemMat[chem_index,1])
index_new_chem = which((new_chem %in% already_selected)==FALSE)
already_selected = new_chem[index_new_chem]
toRem = which(new_chem[index_new_chem] %in% items)
if(length(toRem)>0){
index_new_chem = index_new_chem[-toRem]
}
items = c(items,new_chem[index_new_chem])
items_type = c(items_type,paste("Chemical class: ", chem_group[j],sep=""))
items_lengt = c(items_lengt,length(new_chem[index_new_chem]))
}
}
if(type[i]=="disease"){
validate(
need(input$input_dis != "", "Please select a disease.")
)
good_disease_to_plot = input$input_dis
if(good_disease_to_plot =="ALL"){
good_disease_to_plot = good_disease
}
if(DEBUGGING)
cat("Good disease: ",good_disease_to_plot,"\n")
#good_disease = disease[disease %in% colnames(ADJ_toPlot)]
items = c(items,good_disease_to_plot)
items_type = c(items_type,"disease")
items_lengt = c(items_lengt,length(good_disease_to_plot))
}
}
th = input$slider1
pos_index = which(ADJ_toPlot>0)
neg_index = which(ADJ_toPlot<0)
th_p_val = quantile(ADJ_toPlot[pos_index],th/100)
th_n_val = quantile(ADJ_toPlot[neg_index],1-(th/100))
ADJ_toPlot[which(ADJ_toPlot>0 & ADJ_toPlot<th_p_val)] = 0
ADJ_toPlot[which(ADJ_toPlot<0 & ADJ_toPlot>th_n_val)] = 0
if(edges_type == "P"){
ADJ_toPlot[which(ADJ_toPlot < 0)] = 0
}
if(edges_type == "N"){
ADJ_toPlot[which(ADJ_toPlot>0)] = 0
}
if(DEBUGGING)
cat("Edges_type: ",edges_type,"\n")
gto_plot = graph.adjacency(adjmatrix = ADJ_toPlot[items,items],mode = "undirected",weighted = TRUE)
V(gto_plot)$type = rep(items_type,items_lengt)
gto_plot = igraph::delete.vertices(gto_plot,which(degree(gto_plot)<1))
gto_plot = igraph::simplify(gto_plot)
slices = table(V(gto_plot)$type)
lbls = names(table(V(gto_plot)$type))
#slices = items_lengt
# lbls = items_type
pie(slices, labels = lbls, main="Network statistics",col = rainbow(length(table(items_type))))
})
}
plot_item_network = function(input,output,W2_ADJ){
output$itemNetwork = renderForceNetwork({
ADJ_toPlot = W2_ADJ
edges_type = input$Edges
type = input$checkGroup
ATC_code = input$ATCGroup
if(ATC_code == "ALL"){
ATC_code = ATC_letter_vector
}
chem_group = input$ChemicalGroup
if(chem_group == "ALL"){
chem_group = chemical_group_vector
}
if(DEBUGGING)
cat(type," ",length(type)," ",ATC_code," ",chem_group,"\n")
validate(
need(input$checkGroup != "", "Please select an object group")
#need(length(input$checkGroup)>1, "Please select a couple of object groups")
)
items = c()
items_type = c()
items_lengt = c()
#items_color = c()
for(i in 1:length(type)){
if(type[i]=="nano"){
items = c(items,nano)
items_type = c(items_type,"nano")
items_lengt = c(items_lengt,length(nano))
}
if(type[i]=="drugs"){
validate(
need(input$ATCGroup != "", "Please select an ATC group")
)
already_selected = c()
for(j in 1:length(ATC_code)){
ATC_lev1 = substr(join10$code,1,1)
ATC_index = which(ATC_lev1 %in% ATC_code[j])
new_drugs = unique(join10$name[ATC_index])
index_new_drugs = which((new_drugs %in% already_selected)==FALSE)
already_selected = new_drugs[index_new_drugs]
toRem = which(new_drugs[index_new_drugs] %in% items)
if(length(toRem)>0){
index_new_drugs = index_new_drugs[-toRem]
}
items = c(items,new_drugs[index_new_drugs])
items_type = c(items_type,paste("Drugs ATC: ", ATC_code[j],sep=""))
items_lengt = c(items_lengt,length(new_drugs[index_new_drugs]))
}
}
if(type[i]=="chemical"){
validate(
need(input$ChemicalGroup != "", "Please select a chemical group")
)
already_selected = c()
for(j in 1:length(chem_group)){
chem_index = which(chemMat[,2] %in% chem_group[j])
new_chem = unique(chemMat[chem_index,1])
index_new_chem = which((new_chem %in% already_selected)==FALSE)
already_selected = new_chem[index_new_chem]
toRem = which(new_chem[index_new_chem] %in% items)
if(length(toRem)>0){
index_new_chem = index_new_chem[-toRem]
}
items = c(items,new_chem[index_new_chem])
items_type = c(items_type,paste("Chemical class: ", chem_group[j],sep=""))
items_lengt = c(items_lengt,length(new_chem[index_new_chem]))
}
}
if(type[i]=="disease"){
validate(
need(input$input_dis != "", "Please select a disease.")
)
good_disease_to_plot = input$input_dis
if("ALL" %in% good_disease_to_plot){
good_disease_to_plot = disease[disease %in% colnames(ADJ_toPlot)]
}
if(DEBUGGING)
cat("Good disease: ",good_disease_to_plot,"\n")
items = c(items,good_disease_to_plot)
items_type = c(items_type,"disease")
items_lengt = c(items_lengt,length(good_disease_to_plot))
}
}
th = input$slider1
pos_index = which(ADJ_toPlot>0)
neg_index = which(ADJ_toPlot<0)
th_p_val = quantile(ADJ_toPlot[pos_index],th/100)
th_n_val = quantile(ADJ_toPlot[neg_index],1-(th/100))
ADJ_toPlot[which(ADJ_toPlot>0 & ADJ_toPlot<th_p_val)] = 0
ADJ_toPlot[which(ADJ_toPlot<0 & ADJ_toPlot>th_n_val)] = 0
if(edges_type == "P"){
ADJ_toPlot[which(ADJ_toPlot < 0)] = 0
}
if(edges_type == "N"){
ADJ_toPlot[which(ADJ_toPlot>0)] = 0
}
if(DEBUGGING)
cat("Edges_type: ",edges_type,"\n")
gto_plot = graph.adjacency(adjmatrix = ADJ_toPlot[items,items],mode = "undirected",weighted = TRUE)
V(gto_plot)$type = rep(items_type,items_lengt)
gto_plot = igraph::delete.vertices(gto_plot,which(igraph::degree(gto_plot)<1))
gto_plot = igraph::simplify(gto_plot)
data_frame = from_igraph_to_data_frame(gto_plot)
MyClickScript <-
' d3.select(this).select("circle").transition()
.duration(750)
.attr("r", 30)'
MyClickScript5<-
' d3.select(this).select("circle").attr("opacity", 1);
'
MyClickScript3 <- 'd3.select(this).node().__data__;
node.style("opacity", function (o) {
return neighboring(d, o) | neighboring(o, d) ? 1 : 0.1;
});'
MyClickScript6 <- 'd3.fisheye.circular()
.radius(120)'
MyClickScript4 <- 'alert("You clicked " + d.name + " which is in row " + (d.index + 1) + " degree " + d.weight +
" type " + d.type +
" of your original R data frame");'
if(DEBUGGING)
cat("Render force network...\n")
forceNetwork(Links = data_frame$edges, Nodes = data_frame$vertices,
Source = "source", Target = "target",
Value = "value", NodeID = "name",Nodesize="size",
Group = "type",zoom = TRUE,opacity = 0.95,fontSize = 10,
#radiusCalculation = JS("d.nodesize + 6"),
legend = TRUE,# height = my_screen_h,width =my_screen_w/2,
clickAction = MyClickScript3,charge = -input$repulserad,
linkDistance = JS(paste0("function(d){return d.value*",input$length,"}")))
})
}
|
5010761d6a6eaa3c900e865005fe07b657be249d
|
731e34c16a539dec90c735fdd5d01411beb9ded0
|
/man/QuartetPoints.Rd
|
12cf3db5d00b78ec883a91a760508d7212bd3641
|
[] |
no_license
|
cran/Quartet
|
ea908984409acd1ad9fc1308fb67ba3e38f14b93
|
99856b81479273f2ce404845fd31b57941dec9ac
|
refs/heads/master
| 2022-07-24T18:25:09.315730
| 2022-07-08T09:30:02
| 2022-07-08T09:30:02
| 166,093,762
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,668
|
rd
|
QuartetPoints.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TernaryPoints.R
\name{QuartetPoints}
\alias{QuartetPoints}
\alias{SplitPoints}
\alias{BipartitionPoints}
\title{Plot tree differences on ternary plots}
\usage{
QuartetPoints(trees, cf = trees[[1]])
SplitPoints(trees, cf = trees[[1]])
BipartitionPoints(trees, cf = trees[[1]])
}
\arguments{
\item{trees}{A list of trees of class \code{\link[ape:read.tree]{phylo}},
with identically labelled tips.}
\item{cf}{Comparison tree of class \code{\link[ape:read.tree]{phylo}}. If unspecified,
each tree is compared to the first tree in \code{trees}.}
}
\value{
A data frame listing the ternary coordinates of trees, based on the
amount of information that they have in common with the comparison
tree (which defaults to the first member of the list, if unspecified).
}
\description{
Generate points to depict tree difference (in terms of resolution
and accuracy) on a ternary plot.
}
\details{
The ternary plot will depict the number of quartets or splits that are:
\itemize{
\item resolved in the reference tree (\code{cf}), but neither present nor contradicted
in each comparison tree (\code{trees});
\item resolved differently in the reference and the comparison tree;
\item resolved in the same manner in the reference and comparison trees.
}
If the reference tree (\code{cf}) is taken to represent the best possible knowledge
of the 'true' topology, then polytomies in the reference tree represent
uncertainty. If a tree in \code{trees} resolves relationships within this
polytomy, it is not possible to establish (based only on the reference tree)
whether this resolution is correct or erroneous. As such, extra resolution
in \code{trees} that is neither corroborated nor contradicted by \code{cf} is ignored.
}
\examples{
library('Ternary')
data('sq_trees')
TernaryPlot(alab = 'Unresolved', blab = 'Contradicted', clab = 'Consistent',
point = 'right')
TernaryLines(list(c(0, 2/3, 1/3), c(1, 0, 0)), col = 'red', lty = 'dotted')
TernaryText(QuartetPoints(sq_trees, cf = sq_trees$collapse_one), 1:15,
col = Ternary::cbPalette8[2], cex = 0.8)
TernaryText(SplitPoints(sq_trees, cf = sq_trees$collapse_one), 1:15,
col = Ternary::cbPalette8[3], cex = 0.8)
legend('bottomright', c("Quartets", "Splits"), bty = 'n', pch = 1, cex = 0.8,
col = Ternary::cbPalette8[2:3])
}
\references{
\itemize{
\item \insertRef{Smith2019}{Quartet}
}
}
\author{
\href{https://orcid.org/0000-0001-5660-1727}{Martin R. Smith}
(\href{mailto:martin.smith@durham.ac.uk}{martin.smith@durham.ac.uk})
}
|
a053c6a31522a38c7a7705788cd9e0bbcf6e4b39
|
1be9a4013a6e92171aeb2dd8aec1f72eae8ad192
|
/ProgrammingAssignment3/rankall.R
|
904e1f607661d9083be2b5c7c7e5f979fedb49d1
|
[] |
no_license
|
tmreic/datasciencecoursera
|
698069647fe3e015d8b475111a8a1d37e8daa06d
|
23ac57d863f743a28f22c486662f9b833b3196f4
|
refs/heads/master
| 2021-01-10T02:38:44.074386
| 2015-11-27T19:44:07
| 2015-11-27T19:44:07
| 45,751,988
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,230
|
r
|
rankall.R
|
rankall <- function( outcome, num= "best") {
options(warn = -1)
## Read outcome data
FileData <- read.csv("outcome-of-care-measures.csv",header = TRUE)
#str(FileData)
## Check that state and outcome are valid
# #Error in best("BB", "heart attack") : invalid state
# nr <- FileData[, 7]
# s <-- is.element(state,nr)
# if ( s == -1)
# { #valid sate do nothing
# }
# else
# {
# #x = cat( paste('Error in best("', state,'","',outcome,'") :invalid state', collapse = '', sep = '') )
# stop('invalid state')
# }
#Error in best("NY", "hert attack") : invalid outcome
st = outcome
outcomestr = gsub('[[:space:]]', '.', st)
#print(outcomestr)
o<-- grep(outcomestr,colnames(FileData), ignore.case = TRUE)
#print(length(o))
if ( length(o) > 0 )
{ #valid outcome
}
else
{
stop('invalid outcome')
}
## For each state, find the hospital of the given rank
## Return a data frame with the hospital names and the
## (abbreviated) state name
if (toupper( outcome) == 'HEART ATTACK' ){
df<- read.csv("outcome-of-care-measures.csv",header = TRUE,stringsAsFactors = FALSE)[ ,c('State','Hospital.Name', 'Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack')]
FileDataByState <-subset(df,select= State:Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack )
FileDataByState[, 3] <- as.numeric(FileDataByState[, 3])
#use number of states to drive for loop & use state data
uniqueStates<- unique(FileDataByState$State)
usl = length (uniqueStates)
#dataframe to hold data
final.data <- data.frame(state1 = character() , hospital = character(),state= character(),stringsAsFactors=FALSE )
for (i in 1:usl) {
# subset data and remove hospitials with no data available.
FS <-subset(FileDataByState, FileDataByState$State == uniqueStates[i] & FileDataByState$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack !="Not Available",select= Hospital.Name:Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack )
# order by values
FSS <- FS[order( FS$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack),]
# take means and order by smallest value
afs = aggregate(FSS$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack, list(FSS$Hospital.Name), mean)
safs <- afs[order( afs$x),]
# rank setup.. best =1 , worst = number of records in state, else number
if (num == 'worst'){
x=nrow(safs)
} else if (num == 'best')
{x = 1}
else
{x = num}
#add the data record with right rank to data frame
final.data[nrow( final.data) + 1, ] <- c( uniqueStates[i], safs[x,1],uniqueStates[i] )
}
# rename first column to null and order by state column
names(final.data)[names(final.data)=="state1"] <- ""
final.data <- final.data[order( final.data$state),]
return (final.data )
} else if (toupper( outcome) == 'HEART FAILURE' ){
#Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure
df<- read.csv("outcome-of-care-measures.csv",header = TRUE,stringsAsFactors = FALSE)[ ,c('State','Hospital.Name', 'Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure')]
FileDataByState <-subset(df,select= State:Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure )
FileDataByState[, 3] <- as.numeric(FileDataByState[, 3])
#use number of states to drive for loop & use state data
uniqueStates<- unique(FileDataByState$State)
usl = length (uniqueStates)
#dataframe to hold data
final.data <- data.frame(state1 = character() , hospital = character(),state= character(),stringsAsFactors=FALSE )
for (i in 1:usl) {
# subset data and remove hospitials with no data available.
FS <-subset(FileDataByState, FileDataByState$State == uniqueStates[i] & FileDataByState$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure !="Not Available",select= Hospital.Name:Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure )
# order by values
FSS <- FS[order( FS$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure),]
# take means and order by smallest value
afs = aggregate(FSS$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure, list(FSS$Hospital.Name), mean)
safs <- afs[order( afs$x),]
# rank setup.. best =1 , worst = number of records in state, else number
if (num == 'worst'){
x=nrow(safs)
} else if (num == 'best')
{x = 1}
else
{x = num}
#add the data record with right rank to data frame
final.data[nrow( final.data) + 1, ] <- c( uniqueStates[i], safs[x,1],uniqueStates[i] )
}
# rename first column to null and order by state column
names(final.data)[names(final.data)=="state1"] <- ""
final.data <- final.data[order( final.data$state),]
return (final.data)
} else if (toupper( outcome) == 'PNEUMONIA' ){
#Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia
df<- read.csv("outcome-of-care-measures.csv",header = TRUE,stringsAsFactors = FALSE)[ ,c('State','Hospital.Name', 'Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia')]
FileDataByState <-subset(df,select= State:Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia )
FileDataByState[, 3] <- as.numeric(FileDataByState[, 3])
#use number of states to drive for loop & use state data
uniqueStates<- unique(FileDataByState$State)
usl = length (uniqueStates)
#dataframe to hold data
final.data <- data.frame(state1 = character() , hospital = character(),state= character(),stringsAsFactors=FALSE )
for (i in 1:usl) {
# subset data and remove hospitials with no data available.
FS <-subset(FileDataByState, FileDataByState$State == uniqueStates[i] & FileDataByState$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia !="Not Available",select= Hospital.Name:Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia )
# order by values
FSS <- FS[order( FS$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia),]
# take means and order by smallest value
afs = aggregate(FSS$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia, list(FSS$Hospital.Name), mean)
safs <- afs[order( afs$x),]
# rank setup.. best =1 , worst = number of records in state, else number
if (num == 'worst'){
x=nrow(safs)
} else if (num == 'best')
{x = 1}
else
{x = num}
#add the data record with right rank to data frame
final.data[nrow( final.data) + 1, ] <- c( uniqueStates[i], safs[x,1],uniqueStates[i] )
}
# rename first column to null and order by state column
names(final.data)[names(final.data)=="state1"] <- ""
final.data <- final.data[order( final.data$state),]
return (final.data)
}
}
|
5d87941da99b973342fbcce9383ff0a1b707086b
|
648353748435f6d0f561a85f12b68a79961e00c3
|
/BayesianTools/inst/examples/plotTimeSeriesHelp.R
|
076993c8408b9dbd12c6609a10025590f4ecf950
|
[] |
no_license
|
martynplummer/BayesianTools
|
c5ddee2c3a276e005cb62b8b76272bff0cf073b8
|
9912dffb522949535efb941c999cb76bea3c3400
|
refs/heads/master
| 2021-06-29T15:50:52.493685
| 2017-09-15T11:18:46
| 2017-09-15T11:18:46
| 104,320,326
| 1
| 0
| null | 2017-09-21T08:13:09
| 2017-09-21T08:13:09
| null |
UTF-8
|
R
| false
| false
| 312
|
r
|
plotTimeSeriesHelp.R
|
# Create time series
ts <- VSEMcreatePAR(1:100)
# create fake "predictions"
pred <- ts + rnorm(length(ts), mean = 0, sd = 2)
# plot time series
par(mfrow=c(1,2))
plotTimeSeries(observed = ts, main="Observed")
plotTimeSeries(observed = ts, predicted = pred, main = "Observed and predicted")
par(mfrow=c(1,1))
|
24b3cc73781078bd08cdb157ab18d9fefc2a0437
|
2ab596843b6790fe884db75e6b6d69d1620dd9f1
|
/gabbiadini_plos_2016.R
|
e86e5a1d9402696a25664da4545d87d135b58fdc
|
[] |
no_license
|
Joe-Hilgard/gabbiadini_plos_2016
|
9b4690a22a51a9fe1797f332d9fcceb05efb1882
|
a8230c023f46fb7a7b508b610c62456838a8b80f
|
refs/heads/master
| 2016-09-13T14:52:36.112364
| 2016-05-23T15:08:47
| 2016-05-23T15:08:47
| 56,179,345
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,475
|
r
|
gabbiadini_plos_2016.R
|
# re-analysis of Gabbiadini et al. 2016
library(psych)
library(dplyr)
library(magrittr)
library(ggplot2)
library(lavaan)
library(car)
dat = read.csv("data_set_PLOS.csv")
table(dat$played_game, dat$condition, useNA = 'always')
table(dat$played_game, dat$cond, useNA = 'always')
# Experimental assignment
dat %>%
select(played_game:cond) %>%
distinct
# Avatar identification
dat %>%
select(avatar_id_embodied_presence1:avatar_id_char_empathy4) %>%
glimpse
# Masculine beliefs
dat %>%
select(MRNI1_aggr:MRNI15_restremotionality) %>%
glimpse
# Do these really belong together -- "demonstrate physical prowess" v
# "use any means to 'convince' a girl to have sex"
dat %>%
select(MRNI1_aggr:MRNI15_restremotionality) %>%
psych::alpha()
# I guess so.
# Empathy for victim?
dat %>%
select(Empathy_comprensione:Empathy_disinteresseR) %>%
glimpse
# Game rating. Was there a Game_rating_involvement1?
dat %>%
select(game_rating_involvement2_eccitante:game_rating_involvement3_coinvolgente) %>%
glimpse
# Analysis ---
# Effects of game on empathy towards women -- zilch ----
# I'm guessing "emp_scal" is their outcome -- empathy towards female violence victim
dat %>%
select(Empathy_comprensione:Empathy_disinteresse) %>%
psych::alpha(check.keys = T)
dat %>%
select(Empathy_comprensione:Empathy_disinteresse, emp_scal) %>%
cor(use = "pairwise") %>%
round(3)
m1 = aov(emp_scal ~ as.factor(condition), data = dat)
summary(m1)
TukeyHSD(m1)
m1.1 = lm(emp_scal ~ cond, data = dat)
summary(m1.1)
tapply(dat$emp_scal, dat$cond, mean, na.rm = T)
# Moderated by gender?
m1.2 = lm(emp_scal ~ condition * as.factor(gender), data = dat)
summary(m1.2)
Anova(m1.2, type = 3) # No
# Get means by gender
tapply(dat$emp_scal, INDEX = list(dat$condition, dat$gender), FUN = mean, na.rm = T)
aov(emp_scal ~ condition, data = dat[dat$gender == 1,]) %>% TukeyHSD()
# No differences even among men
# Effects of game on masculine beliefs ----
# ANOVA
m2 = aov(mas_beli ~ condition, data = dat)
summary(m2)
TukeyHSD(m2) # GTA differs from nonviolent, p = .03
# Linear contrast
m2.1 = lm(mas_beli ~ cond, data = dat)
summary(m2.1)
ggplot(dat, aes(x = as.factor(cond), y = mas_beli,
col = as.factor(cond), shape = played_game)) +
geom_boxplot(width = .3, notch = T)
ggplot(dat, aes(x = as.factor(cond), y = mas_beli,
col = as.factor(cond), shape = played_game)) +
geom_point(position = position_jitter(width = .5))
# Identification with game character ----
# Greatest identification in half-life
m3 = aov(avatarID ~ condition, data = dat)
summary(m3)
TukeyHSD(m3) # HL is greater than GTA, greater than Qube/pinball
# compare to total scale
m3.1 = aov(avatar_id ~ condition, data = dat)
summary(m3.1)
TukeyHSD(m3.1) # HL is greater than GTA, greater than Qube/pinball
# Our predicted 3-way interaction between game, gender, and identification ---
dat %>%
filter(!is.na(gender)) %>%
ggplot(aes(x = avatad_IDcentxgendercodxcond101)) +
geom_histogram() +
facet_grid(gender ~ condition)
m4 = lm(mas_beli ~ condition*as.factor(gender)*avatarID, data = dat)
summary(m4)
hist(m4$residuals)
ggplot(dat, aes(x = avatarID, y = mas_beli, col = as.factor(cond),
lty = as.factor(gender), shape = as.factor(gender))) +
geom_point() +
geom_smooth(method = "lm", se = F)
# Does that 3-way interaction generalize to the primary outcome? ---
m5 = lm(emp_scal ~ as.factor(cond)*as.factor(gender)*avatarID, data = dat)
summary(m5) # No it does not
hist(m5$residuals)
Anova(m5, type = 3)
# Ordinal code?
m5.1 = lm(emp_scal ~ cond*as.factor(gender)*avatarID, data = dat)
summary(m5.1) # Still the answer is no
hist(m5.1$residuals)
Anova(m5.1, type = 3)
mz = lm(emp_scal ~ cond101*gend_con*avatad_IDcent, data = dat )
summary(mz)
mz1 = lm(emp_scal ~ avatad_IDcentxgendercodxcond101, data = dat)
summary(mz1)
ggplot(dat, aes(x = avatarID, y = emp_scal, col = as.factor(cond),
lty = as.factor(gender), shape = as.factor(gender))) +
geom_point() +
geom_smooth(method = 'lm', se = F) +
ggtitle("Gender × Identification × Game (-1, 0, 1)")
# Kinda? But not significantly so.
# So just how many highly-identified violent male gamers are there?
hist(dat$avatarID[dat$cond == 3 & dat$gender == 1], breaks = 7)
sum(dat$avatarID[dat$cond == 3 & dat$gender == 1] > 4) # about 17
length(dat$avatarID[dat$cond == 3 & dat$gender == 1]) # out of 22
ggplot(dat, aes(x = mas_beli, y = emp_scal, col = as.factor(gender))) +
geom_point() +
geom_smooth(method = "lm")
# Were these two factors correlated in the control group? Yeah
dat %>%
filter(cond == 1) %>%
select(gender, mas_beli, emp_scal) %>%
cor(use = 'pairwise') %>%
round(3)
dat %>%
filter(cond == 1) %>%
ggplot(aes(x = mas_beli, y = emp_scal, col = as.factor(gender))) +
geom_point() +
geom_smooth(method = "lm")
# Not sure if I'm contrast-coding this correctly
model.check = lm(emp_scal ~ mas_beli * C(as.factor(gender), "contr.treatment"), data = dat, subset = cond == 1)
summary(model.check)
# SEM ----
modelSEM = {
"emp_scal ~ mas_beli
mas_beli ~ cond101 + gend_con + avatad_IDcent +
gender_codxcond101 + avatad_IDcentxgendercod + avatad_IDcentxcond101 +
avatad_IDcentxgendercodxcond101"
}
model.sem = sem(modelSEM, data = dat)
summary(model.sem)
# How many have avatar_id > 4.29?
dat %>%
filter(condition == "GTA", gender == 1) %>%
select(avatarID)
dat %>%
filter(condition == "GTA", gender == 1) %>%
with(., hist(avatarID))
dat %>%
filter(condition == "GTA", gender == 1) %>%
with(., hist(avatarID, breaks = c(2, 3, 4, 4.2583, 5, 6, 7), freq = T))
abline(v = 4.2583, col = "red", lwd = 2)
dat %>%
filter(condition == "GTA", gender == 1) %>%
with(., table(avatarID > 4.2583))
# Why are avatar_id and avatarID different?
with(dat, plot(avatarID, avatar_id))
# avatarID seems to be the one used in the process model
with(dat, plot(avatarID, avatad_IDcent))
# Why do avatarID and avatar_id differ?
# AvatarID is just the "embodied presence" subscale of the identification measure
# Embodied presence subscale:
dat %>%
select(avatar_id_embodied_presence1:avatar_id_embodied_presence6) %>%
apply(1, FUN = mean) %T>%
plot(x = ., y = dat$avatarID,
main = "avatarID is embodied presence subscale",
xlab = "subscale mean") %>%
plot(x = ., y = dat$avatar_id,
main = "avatar_id is not embodied presence subscale",
xlab = "subscale mean")
# Total measure mean:
dat %>%
select(avatar_id_embodied_presence1:avatar_id_char_empathy4) %>%
apply(1, FUN = mean) %T>%
plot(x = ., y = dat$avatarID,
main = "avatarID is not total measure mean",
xlab = "Total measure mean") %>%
plot(x = ., y = dat$avatar_id,
main = "avatar_id is the total measure mean",
xlab = "Total measure mean")
# Are the means in table 2 avatarID or avatar_id?
tapply(dat$avatarID, dat$condition, FUN = mean) # matches
tapply(dat$avatar_id, dat$condition, FUN = mean) # does not match
# How about in GLM?
fit.x = lm(emp_scal ~ condition * as.factor(gend_con) * avatar_id, data = dat)
summary(fit.x)
# Looking at models & plots within critical subset -----
# The critical region? Males w/ ID > 4.258
dat.crit = dat %>%
filter(avatarID > 4.2583, gend_con == 1)
# Null results in total-sample GLM
m = lm(emp_scal ~ cond101*as.factor(gend_con)*avatarID, data = dat)
summary(m); Anova(m, type = 3)
# Null main effect in critical subsample GLM
m.crit = lm(emp_scal ~ cond101, data = dat.crit)
summary(m.crit); hist(m.crit$residuals)
# Significant interaction in critical subsample GLM
m.crit2 = lm(emp_scal ~ cond101*avatarID, data = dat.crit)
summary(m.crit2); hist(m.crit2$residuals)
# All subjects
ggplot(dat, aes(x = condition, y = emp_scal)) +
geom_violin() +
geom_boxplot(width = .2, notch = T) +
ggtitle("All subjects")
# All males
dat %>%
filter(gend_con == 1) %>%
ggplot(aes(x = condition, y = emp_scal)) +
geom_violin() +
geom_boxplot(width = .2, notch = T) +
ggtitle("All males")
# Critical region
ggplot(dat.crit, aes(x = condition, y = emp_scal)) +
geom_violin() +
geom_boxplot(width = .2, notch = T) +
#geom_point(alpha = .25) +
ggtitle("Only the Highly-Identified Males")
# Regression lines
ggplot(dat, aes(x = avatarID, y = emp_scal, col = condition, lty = as.factor(gend_con))) +
geom_point() +
geom_smooth(method = 'lm', se = F) +
ggtitle("3-Way interaction among all subjects")
dat %>%
filter(gend_con == 1, avatarID > 4.25) %>%
ggplot(aes(x = avatarID, y = emp_scal, col = condition)) +
geom_point() +
geom_smooth(method = 'lm') +
geom_point(data = dat,
aes(x = avatarID, y = emp_scal, col = condition),
alpha = .15) +
ggtitle("Regression within only highly-identified males")
dat = dat %>%
mutate(crit_code = ifelse(dat$gend_con == 1 & dat$avatarID > 4.2583 & dat$condition == "GTA", "Yes", "No"))
ggplot(dat, aes(x = avatarID, y = emp_scal, col = crit_code)) +
geom_point() +
geom_smooth(method = "lm") +
ggtitle("Empathy as function of critical combo")
ggplot(dat, aes(x = crit_code, y = emp_scal, col = crit_code)) +
geom_violin() +
geom_boxplot(width = .25, notch = T)
# Potential confound or non-random assignment of age ----
aov(age ~ condition, data = dat) %>% TukeyHSD
ggplot(dat, aes(x = age, fill = condition)) + geom_bar(stat = "count")
|
c775fb66db136c2de797f1e3545c3fdfb1111945
|
0be3fd82803848a267ad273aa98116fe8ea35b1f
|
/run_analysis.R
|
e973dbd8d30b6ccd6c7c302676bbc7c508f8e738
|
[] |
no_license
|
Crimsoma/Clean_data
|
723245f48831cad6c1355579bbe6787cce6fbe9d
|
87591aee1acdc0f7e8d97b48cc4b252ae8b5d6af
|
refs/heads/master
| 2016-09-10T21:23:33.860769
| 2015-04-26T08:30:43
| 2015-04-26T08:30:43
| 34,601,849
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,860
|
r
|
run_analysis.R
|
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", "meter.zip")
unzip("meter.zip")
#assembly of proper variable names
labels <- read.table("UCI HAR Dataset/features.txt")
labels[,2] <- sub("^t","time",labels[,2])
labels[,2] <- sub("^f", "frequency", labels[,2])
labels[,2] <- gsub("-(\\w)(\\w*)", "\\U\\1\\L\\2", labels[,2], perl = T)
labels[,2] <- gsub ("angle\\(t", "angleOfTime", labels[,2])
labels[,2] <- gsub ("angle\\(", "angleOf", labels[,2])
labels[,2] <- gsub (",", "And", labels[,2])
labels[,2] <- gsub ("\\(\\)", "", labels[,2])
labels[,2] <- gsub ("Acc", "Acceleration",labels[,2])
labels[,2] <- gsub ("\\)", "", labels[,2])
#reading sets
train1 <- read.table("UCI HAR Dataset/train/X_train.txt", col.names = labels[,2])
train2 <- read.table("UCI HAR Dataset/train/Y_train.txt")
train3 <- read.table("UCI HAR Dataset/train/subject_train.txt")
trainFull <- cbind(train3,train2,train1)
test1 <- read.table("UCI HAR Dataset/test/X_test.txt", col.names = labels[,2])
test2 <- read.table("UCI HAR Dataset/test/Y_test.txt")
test3 <- read.table("UCI HAR Dataset/test/subject_test.txt")
testFull <- cbind(test3,test2, test1)
#combining training and test sets
data <- rbind(trainFull, testFull)
#renaming
data[2] <- factor(data[,2], labels = c("WALKING","WALKING_UPSTAIRS","WALKING_DOWNSTAIRS","SITTING","STANDING","LAYING"))
names(data)[1:2] <- c("Subject" ,"Activity")
#arrange by subject
library(plyr)
library(dplyr)
data <- arrange(data,Subject)
#getting mean and std
ndata <- data[grepl("Subject|Activity|Mean|Std", colnames(data))]
#new data set with average by activity and subject
cols <- (lapply(c("Subject","Activity"), as.symbol))
actData <-ndata %>% group_by_(.dots=cols) %>% summarise_each(funs(round(mean(.),3)))
#tidy.txt
write.table(actData, file = "tidy.txt", row.names=F)
|
0913af8bed1268fc3ce67eebb28ff3baeba96e82
|
fee1d58e1fcd16fd1c627f5e0facbb93af451c77
|
/server.R
|
3bb111f2cbed0567575411054a438470e779388f
|
[] |
no_license
|
gegp01/C3reporta
|
48f53c4b89570c0b5072058aa4dba2a0819bbf9b
|
a461544a015e3e570a9588b9ff2223ef4e864cef
|
refs/heads/master
| 2021-01-03T00:15:28.142974
| 2020-03-23T15:03:01
| 2020-03-23T15:03:01
| 239,830,972
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,033
|
r
|
server.R
|
server <- shinyServer(function(input, output) {
X<-read.csv("https://gegp01.github.io/C3reporta/coords.csv") # Datos de Jhon Hopkins Institute y WHO
D<-X[3:104,] # reported cases
death<-X[grep("death", X$name),]
death$country<-do.call(rbind, strsplit(as.vector(death$name), ".", fixed=T))[,1]
y<-do.call(rbind, strsplit(as.vector(death$name), ".", fixed=T))[,2]
death$check<-y=="deaths"
d<-death[death$check==T,]
dead.count<-aggregate(as.numeric(as.character(d$data)), list(d$country), sum)
dead.count$Group.1<-as.vector(dead.count$Group.1)
dead.count$Group.1[dead.count$Group.1=="UnitedStatesofAmerica"]<-"United States"
dead.count$Group.1[dead.count$Group.1=="RepublicofKorea"]<-"South Korea"
dead.count$Group.1[dead.count$Group.1=="UnitedKingdom"]<-"United Kingdom"
dead.count$Group.1[dead.count$Group.1=="SanMarino"]<-"San Marino"
dead.count$latitude<-X$latitude[match(dead.count$Group.1, X$name)]
dead.count$longitude<-X$longitude[match(dead.count$Group.1, X$name)]
# D$dead<-d$data[match(D$name, d$country)]
D$dead<-dead.count$x[match(D$name, dead.count$Group.1)]
D$dead[is.na(D$dead)]<-0
text0<-paste(D$name, ":", D$dead, "muertes", "|", D$data, "casos confirmados")
text1<-paste("<p style='width: 90vh;'><b>Total de casos confirmados con COVID19: </b>", X$data[X$name=="Global.confirmed"]
, "<br><b>Muertes: </b>", sum(dead.count$x)
, "<br><a href='https://www.who.int/emergencies/diseases/novel-coronavirus-2019/situation-reports' target='_blank'>Reporte de la WHO No: ", X$data[X$name=="WHO report"], "</a>"
, "<br>Fecha: ", X$data[X$name=="Date"]
, "<br></p>")
# # Polygon values, ordered by wrld_simpl$NAME
# infected<-as.numeric(as.character(D$data)[match(as.vector(wrld_simpl$NAME), D$name)])
# susceptibles<-wrld_simpl$POP2005
# dead<-as.numeric(as.character(D$dead)[match(as.vector(wrld_simpl$NAME), D$name)])
#
# # z<-(infected/susceptibles)*1000
#
# popup<-text0[match(as.vector(wrld_simpl$NAME), D$name)]
#
# z<-dead/infected
#z<-as.numeric(as.character(D$dead))/as.numeric(as.character(D$data)) # dead/infected. Note: Add epsilon here
z<-log(as.numeric(as.character(D$dead))+1)
myPal <- colorRampPalette(c("royal blue", "red"))
p<-transp(num2col(z, col.pal=myPal),.8)
output$map <-renderLeaflet({
leaflet() %>% setView(20, 10, zoom = 2) %>% addTiles() %>% addCircles(lng = D$longitude, lat = D$latitude, radius = as.numeric(as.character(D$data))*10, label=htmlEscape(text0), labelOptions(noHide=T), color=p)
# leaflet(wrld_simpl) %>% addPolygons(stroke = TRUE, color = "white", weight="1", smoothFactor = 0.3, fillOpacity = 1, fillColor = p, popup=htmlEscape(popup), popupOptions=popupOptions(closeButton = FALSE))
})
output$caption<-renderUI({
HTML(text1)
})
})
|
b5c58b5a10e1eae1b2264131e9cbd8b16b870ca4
|
cd8376335f7de8210143eb742ac7f18e8cc571b3
|
/R/setup_demo.r
|
d9e7b5d6150ea3ec4a15a868b9723f7543f1bbcc
|
[] |
no_license
|
srhoades10/pkgdemo
|
dafe86035e9f857ee6fa0de53aef731f4f77cdb0
|
6a36c881a85e2d8b9ca623b9280373c809fe2ef8
|
refs/heads/main
| 2023-05-14T01:54:44.215493
| 2021-06-08T13:30:40
| 2021-06-08T13:30:40
| 375,001,054
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,210
|
r
|
setup_demo.r
|
# Basic function setup
#' Add 2 numbers!
#
#' @param x: Float/Int
#' @param y: Float/Int
#' @return Summed numbers
#' @export
addNums <- function(x, y, ...){
val <- x+y
return(val)
}
#' Create random table!
#
#' @param nRow: Int
#' @param nCol: Int
#' @param addSample: Bool - Add a "Sample" column, numbered 1:nRow
#' @return Data.Frame
#' @export
createRandomTbl <- function(nRow, nCol, addSample=TRUE, ...){
if(nRow <= 0 | nCol <= 0){
stop('Number of rows and columns must g.t.e 1')
}
df <- data.frame(matrix(rnorm(nRow*nCol), nrow=nRow, ncol=nCol))
if(addSample==TRUE){
df$Sample <- as.factor(1:nRow)
}
return(df)
}
#' Fit PCA on data.frame
#'
#' Note: columns are removed which have 0 variance
#'
#' @param data: Individual MZ data aggregated into NxM
#' @param nonDatCols: List - Non-data-containing columns
#' @param returnFit: List - Return only the fit object
#' @return PCA scores
#' @export
fitPCA <- function(data, nonDatCols=c('Sample'), returnFit=FALSE){
dataOnly <- dplyr::select(data, -nonDatCols)
dataOnly[is.na(dataOnly)] <- 0
dataOnly <- dataOnly[ , apply(dataOnly, 2, var) != 0]
pcaFit <- prcomp(dataOnly, center=TRUE, scale=TRUE)
if(returnFit == TRUE){
return(pcaFit)
} else {
pcaDF <- as.data.frame(pcaFit$x)
projections <- cbind(data$Sample, pcaDF[, c('PC1' ,'PC2')])
colnames(projections)[1] <- 'Sample'
return(projections)
}
}
#' Calculate the ellipse for a confidence interval on the PCA projections
#'
#' @param pcaProjections: Scores from a prcomp() fit
#' @param confidence: Hotellings confidence (usually 95%)
#' @return 2D Ellipse boundaries for plotting the confidence interval
#' @export
calcPCAEllipse <- function(pcaProjections, confidence=95, ...){
theta <- c(seq(-pi, pi, length = 50), seq(pi, -pi, length = 50))
circle <- as.matrix(cbind(cos(theta), sin(theta)))
sigma <- var(cbind(pcaProjections$PC1, pcaProjections$PC2))
mu <- c(mean(pcaProjections$PC1), mean(pcaProjections$PC2))
ed <- sqrt(qchisq(confidence/100, df = 2))
ell <- data.frame(sweep(circle %*% chol(sigma) * ed, 2,
mu, FUN = "+"), groups = 1)
names(ell)[1:2] <- c("xvar", "yvar")
return(ell)
}
#' Plot PCA for a data frame
#'
#' @param data: Data.frame
#' @param confidence: Int/Float - Hotellings confidence (usually 95%)
#' @return ggplot object of PCA scores
#' @export
plotPCA <- function(data, confidence=95, nonDatCols=c('Sample'), ggTitle=NULL, ...){
pcaProjections <- fitPCA(data, nonDatCols=nonDatCols)
pcaEllipse <- calcPCAEllipse(pcaProjections=pcaProjections, confidence=confidence)
#Re-run part of fitPCA just to get the variance explained
dataOnly <- dplyr::select(data, -nonDatCols)
dataOnly[is.na(dataOnly)] <- 0
dataOnly <- dataOnly[ , apply(dataOnly, 2, var) != 0]
pcaFit <- prcomp(dataOnly, center = TRUE, scale = TRUE)
varExp1 <- round(summary(pcaFit)$importance[2,1] * 100, 2)
varExp2 <- round(summary(pcaFit)$importance[2,2] * 100, 2)
pcaPlot <- ggplot2::ggplot(pcaProjections, ggplot2::aes(x = PC1, y = PC2, colour = Sample))+
ggplot2::geom_point(size = 5)+
ggplot2::geom_path(data = pcaEllipse, ggplot2::aes(x = xvar, y = yvar), color = 'black')+
ggplot2::theme_bw()+
ggplot2::ylab(sprintf('PC2 (%s%s variance explained)', varExp2, '%')) +
ggplot2::xlab(sprintf('PC1 (%s%s variance explained)', varExp1, '%')) +
ggplot2::ggtitle(ggTitle)+
ggplot2::geom_abline(ggplot2::aes(intercept = 0, slope = 0), color = 'black', size = 0.25)+
ggplot2::geom_vline(ggplot2::aes(xintercept = 0), color = 'black', size = 0.25)+
ggplot2::theme(axis.text = ggplot2::element_blank(),
axis.title = ggplot2::element_text(size = 11, face = "bold"),
legend.title = ggplot2::element_text(size = 11),
legend.text = ggplot2::element_text(size = 10),
title = ggplot2::element_text(size = 12, face = "bold"),
plot.title = ggplot2::element_text(hjust = 0.5))
return(pcaPlot)
}
|
8c6e4830553f123d3abd38db820161255f766083
|
a689e2c6152d8bc562d73eb06976f8d6b3080367
|
/messages_per_day_and_hour.R
|
31373de61b2e4c95ceba486e0863bd3eca9e34cc
|
[] |
no_license
|
reubenjoseph/Whatsapp_chat_analysis
|
6e62dbc2b5f506a26f77ea1ec69cb1a018bbfda4
|
e0a38bdd89bc194d9a21870d15571904bf2ce66e
|
refs/heads/master
| 2022-11-29T01:13:50.201001
| 2020-07-28T18:50:09
| 2020-07-28T18:50:09
| 283,297,364
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,282
|
r
|
messages_per_day_and_hour.R
|
title<-paste0("Most Messages happen at hour ",chat() %>% mutate(hour = hour(time)) %>% count(hour) %>% top_n(1) %>% pull(hour))
chat %>%
mutate(hour = hour(time)) %>%
count(hour) %>%
ggplot(aes(x = hour, y = n)) +
geom_bar(stat = "identity",fill="steelblue") +
ylab("") + xlab("Messages for every hour") +
ggtitle(title)+
scale_x_continuous(breaks = 0:23)
daysed<-c("Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday")
most_active_day_of_week<-chat() %>% mutate(day = wday(as.Date(time),week_start = 2)) %>% count(day) %>% top_n(1) %>% pull(day)
most_active_day_of_week<-daysed[most_active_day_of_week]
title<-paste0("Most messages are sent on a ",most_active_day_of_week)
days<-c("Mon","Tue","Wed","Thu","Fri","Sat","Sun") # for axis labels
chat() %>%
mutate(day = wday(as.Date(time),week_start = 2)) %>%
count(day) %>%
ggplot(aes(x = day, y = n)) +
geom_bar(stat = "identity", fill="steelblue") +
ylab("") + xlab("Messages Per Day of week") +
ggtitle(title) +
scale_x_continuous(breaks = 1:7,labels=days)+
scale_x_continuous(breaks = 1:7,labels=days)
|
95d81e141d0fecb81ca924a717ca9c2a2c3eb045
|
38116111ccbbb1c4580d8e8c5ac3f9775e1fa384
|
/man/probabilitiesFromCooccurrence.Rd
|
dfc3d6cc3cf1d38fbecee50cf4fd7def19af1ca8
|
[
"MIT"
] |
permissive
|
terminological/tidy-info-stats
|
6c1e37684eeac8d765384b773a23f0488eb7b467
|
1b1f19a718edb44c7178943c322b45fd1e3c93b1
|
refs/heads/master
| 2022-11-30T08:16:46.311945
| 2022-11-18T20:37:21
| 2022-11-18T20:37:21
| 232,600,275
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,611
|
rd
|
probabilitiesFromCooccurrence.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidyConvertToProbabilities.R
\name{probabilitiesFromCooccurrence}
\alias{probabilitiesFromCooccurrence}
\title{Helper function to calculate probability from grouped data in a tidy friendly manner}
\usage{
probabilitiesFromCooccurrence(df, groupXVars, groupYVars, countVar = NULL)
}
\arguments{
\item{df}{a dataframe containing 2 columns defining class of event X and class of event Y and either one row per event,
or a count of observations, for each class combination.
df may also be grouped and in which case the grouping is preserved in the result.}
\item{groupXVars}{the datatable column(s) defining the class of event X quoted by vars(...)}
\item{groupYVars}{the datatable column(s) defining the class of event Y quoted by vars(...)}
\item{countVar}{the datatable column containing the observed frequency combination of event XY. If this is missing the row count will be used instead}
}
\value{
A new datatable with all possible combinations of X&Y and the probabilities associated with each outcome (i.e. an N(X) by N(Y) set of binary confusion matrices)
}
\description{
The purpose of this is to calculate the probabilities of all binary outcomes from class data.
This function is useful when you have 2 types of events (X and Y) and
you either have a set of observations of their co-occurrence, containing non-unique X & Y combinations, or you have a confusion matrix of the counts of their combinations where
each row has a unique combination of X and Y and a third column contains the counts of XY co-occurrences.
}
|
85594835fe82ce0571dcf10cefc3d72f1c7044f8
|
f31ed12f347be5d88f968532e6693b7d22ba2c8b
|
/Final.R
|
4df0505afb1696a1d7f036f0037cc110e43b58e1
|
[] |
no_license
|
norib016/Grad_School_Admission_Prediction
|
46940f632a2641d91c0df35a5514d69f8dd8c92e
|
41a8ebdd54b85360dc0dcb785494f55b2b733f57
|
refs/heads/master
| 2022-08-17T01:10:02.347117
| 2020-05-21T04:26:15
| 2020-05-21T04:26:15
| 265,753,960
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,282
|
r
|
Final.R
|
# Library--------------------------------------------------
library(readr)
library(corrplot)
library(leaps)
# Data Loading and preparing ------------------------------
Admission_Predict_Ver1_1 <- read_csv("Admission_Predict_Ver1.1.csv")
data <- Admission_Predict_Ver1_1
originaldata <- data
rm(Admission_Predict_Ver1_1)
colnames(data)[colnames(data)=="GRE Score"] <- "GRE"
colnames(data)[colnames(data)=="TOEFL Score"] <- "TOEFL"
colnames(data)[colnames(data)=="University Rating"] <- "UniRating"
colnames(data)[colnames(data)=="Chance of Admit"] <- "AdmitChance"
data$`Serial No.` <- NULL
data
# Data Loading and preparing ------------------------------
# CORRELATION PLOT ----------------------------------------
#form the model matrix and correlation variable
varMat <- model.matrix(~.-Research,data=data)[,-1] #Removing the first column as it has
#1's and 0's.
varMat
varCor <- cor(varMat)
#plot the correlation
corrplot(varCor,method = "circle",
tl.col = "black", mar = c(0,0,2,0),
title = "Graduate Admission numerical Variables Correlation")
corrplot(varCor,add = TRUE, # add the above plot
type = "lower", method = "number",number.font = 2,
number.cex = .75,col = "black",
diag = FALSE,tl.pos = "n", cl.pos = "n")
rm(varMat, varCor)
# LINEAR REGRESSION ----------------------------------------
str(data)
#Check if there are any rows with missing values
anyNA(data)
regdata <- data
regdata$GRE <- as.integer(regdata$GRE)
regdata$TOEFL <- as.integer(regdata$TOEFL)
regdata$UniRating <- as.factor(regdata$UniRating)
regdata$Research <- as.logical(regdata$Research)
str(regdata)
contrasts(regdata$UniRating)
#Fit a model using all predictors
linear.fit=lm(AdmitChance ~.,data=regdata)
summary(linear.fit)
# SOP is not a reliable indicator with a p value of 0.73
#Fit a model using all predictors but SOP
linear.fit=lm(AdmitChance ~ .-SOP,data=regdata)
summary(linear.fit)
exp(coef(linear.fit))
# All predictors are significant now
#Use subsets and run the model
regfit.full = regsubsets(AdmitChance ~., regdata)
reg.summary <- summary(regfit.full)
reg.summary
names(reg.summary)
reg.summary$adjr2 #best with all indicators in place except SOP, 5 var => 0.819, 6 var => 0.820 marginal diff
reg.summary$cp #lowest with all predictors in place except SOP
reg.summary$rss #lowest with all predictors except SOP in place
reg.summary$bic #lowest with 5 predictors, SOP and UniRating not imporving BIC
windows()
par(mfrow = c(2,2))
xlab = "Number of Variables"
# 1st row 1st column
plot(reg.summary$rss,xlab = xlab, ylab = "RSS",type = "l")
loc <- which.min(reg.summary$rss)
loc
points(loc,reg.summary$rss[loc], col = "red",cex = 2,pch = 20)
# 1st row 2nd column
plot(reg.summary$adjr2,xlab = xlab, ylab = "Adjusted RSq",type = "l")
loc <- which.max(reg.summary$adjr2)
loc
points(loc,reg.summary$adjr2[loc], col = "red",cex = 2,pch = 20)
# 2nd row 1st column
plot(reg.summary$cp,xlab = xlab, ylab = "Cp",type = 'l')
loc <- which.min(reg.summary$cp)
loc
points(loc,reg.summary$cp[loc], col = "red",cex = 2,pch = 20)
# 2nd row 2nd column
plot(reg.summary$bic,xlab = xlab, ylab = "BIC",type = 'l')
loc <- which.min(reg.summary$bic)
loc
points(loc,reg.summary$bic[loc], col = "red",cex = 2,pch = 20)
dev.off()
## What is the mean square error (base case)?
mse = round(reg.summary$rss[5]/nrow(regdata), 4)
mse #0.0035 for 5, 6, 7 variable model, lets see if train/test technique improves the result substantially
################ Attempt to Linear Regression with Train and Test subsets over 100 iterations ########
set.seed(123)
train = sample(c(TRUE,FALSE), nrow(regdata), replace = TRUE)
test = !train
# Find the best training set models
regfit.best = regsubsets(AdmitChance~., data = regdata[train,],nvmax = 7)
# Obtain the test set design matrix
test.mat = model.matrix(AdmitChance~., data = regdata[test,])
# Vector for errors
val.errors = rep(NA,7)
# Run for each number of variables in the model
for (i in 1:7) {
# obtain the training set coefficients
coefi = coef(regfit.best,id = i)
# predict test set values
pred = test.mat[,names(coefi)] %*% coefi
# Obtain the MSE
val.errors[i] = mean((regdata$AdmitChance[test] - pred)^2)
}
val.errors # 5 to 7 several variable scenarios are stable at 0.0035
predict.regsubsets =
function(object,newdata,id,...){
form = as.formula(object$call[[2]])
mat = model.matrix(form,newdata)
coefi = coef(object,id = id)
xvars = names(coefi)
mat[,xvars] %*% coefi
}
k <- 100
set.seed(123)
folds <- sample(1:k,nrow(regdata), replace = TRUE)
cv.errors = matrix(NA,nrow = k,ncol = 7, dimnames = list(NULL, paste(1:7)))
# The fold and number of variables loops
for (j in 1:k) { # fold loop
# The 19 best models with jth fold omitted
bestfit.fold = regsubsets(AdmitChance ~., data = regdata[folds != j,],nvmax = 7)
# The MSE for the fold prediction error
for (i in 1:7) {# number of variable loop
pred = predict.regsubsets(bestfit.fold, regdata[folds == j,],id = i)
cv.errors[j,i] = mean((regdata$AdmitChance[folds == j] - pred)^2)
}
}
# Find the mean across the fold MSE for each model
mean.cv.errors = apply(cv.errors,2,mean)
round(mean.cv.errors, 4) # best case is 0.0035 for 5 var model, not to different from base case 0.0036
par(mfrow = c(1,1))
plot(mean.cv.errors,type = 'b')
which.min(mean.cv.errors)
#############################################################
# Train and Test -----------------------------------------
set.seed(123)
num <- sample(1:500, nrow(data)*0.75, replace = FALSE)
train <- data[num,]
test <- data[-num,]
rm(num)
# Train and Test -----------------------------------------
temp <- test$AdmitChance
temp <- as.data.frame(temp)
# Random Forest -------------------------------------------
library(randomForest)
library(ggplot2)
rf <- randomForest(AdmitChance ~., data = train) # with all the variables
varImpPlot(rf, main = 'Model Importance Plot')
impplot <- rf$importance
impplot <- as.data.frame(impplot)
impplot$Attribute <- rownames(impplot)
p <- ggplot(data = impplot, aes(reorder(Attribute, IncNodePurity), IncNodePurity)) + geom_col(mapping = NULL, data = NULL, position = "stack",
width = NULL, na.rm = FALSE, show.legend = NA,
inherit.aes = TRUE,fill = 'steelblue4')
p + coord_flip() + xlab("Attributes") + labs(title = "Variable Importance Plot")
rm(p,impplot)
plot(rf, main = "Error with Number of Trees")
pretest <- predict(rf,test[-8])
temp$rf <- pretest
rf2 <- randomForest(AdmitChance ~ CGPA+UniRating+SOP+LOR+Research, data = train) # with few variables
pretest <- predict(rf2,test[c(-1,-2,-8)])
temp$rf2 <- pretest
rf3 <- randomForest(AdmitChance ~GRE+TOEFL+UniRating+SOP+LOR+CGPA, data = train) # without research based imp plot
pretest <- predict(rf3,test[c(-7,-8)])
temp$rf3 <- pretest
rm(pretest)
# Random Forest -------------------------------------------
# RMSE ----------------------------------------------------
library(ModelMetrics)
rmse(temp$temp,temp$rf) # with all variables = 0.03700746
rmse(temp$temp,temp$rf2)# with all variables = 0.05727096
rmse(temp$temp,temp$rf3) # without Research variable = 0.03746171
# Based on the RMSE value model with all the predictors and without Research
#is performed best
# RMSE ----------------------------------------------------
# RANDOM FOREST WITH CV ----------------------------------------------
k = 10
fold = 1:10
datacv <- data
datacv$kfold <- sample(1:k, nrow(datacv), replace = TRUE)
length(which(datacv$kfold == 9))
prediction <- data.frame()
test_sets <- data.frame()
train_sets <- data.frame()
train_pred <- data.frame()
for(n in 1:k){
###Grab all the rows with the id 'n', aggregate them into a test set
test_set = datacv[which(datacv$kfold %in% n), -ncol(datacv)]
###All the other rows (the other 9 parts) go in the training set
train_set = datacv[which(datacv$kfold %in% fold[-c(n)]), -ncol(datacv)]
forest = randomForest(AdmitChance ~., data = train_set, importance = TRUE, ntree = 500)
###Run the model on the test set, save the prediction in a dataframe, the test set in another. Then you can compare them to get your performance measure.
n_predict = data.frame(predict(forest, test_set))
prediction = rbind(prediction, n_predict)
test_sets = rbind(test_sets, as.data.frame(test_set))
train_sets = rbind(train_sets, train_set)
train_pred = rbind(train_pred, as.data.frame(forest$predicted))
}
test_sets$
library(ModelMetrics)
a <- rmse(prediction$predict.forest..test_set., test_sets$AdmitChance) # = 0.062
varImpPlot(forest)
b <- rmse(train_pred$`forest$predicted`, train_sets$AdmitChance) # = 0.062
rm(a,b)
# RANDOM FOREST WITH CV ----------------------------------------------
|
745fddd767de3b7eff1e2d6da0ff10ef9b8b9164
|
8f8eac85cfbf8d3bc768318848ec964cb297b1cb
|
/nesi/labour_situation/0_do_all_labour_situation.R
|
48719a1e0abcba45c9ba8407de51e57b31622ac4
|
[] |
no_license
|
jnaudon/datachile-etl
|
5231a3762dd32f3f3def4d568fc63934d603cf8b
|
8fa577378d38f8d63f6dfdb00ed515bbb439f154
|
refs/heads/master
| 2023-03-23T00:36:35.698292
| 2019-03-23T03:30:16
| 2019-03-23T03:30:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 638
|
r
|
0_do_all_labour_situation.R
|
source("labour_situation/1_functions/1_load_functions.R")
source("labour_situation/1_functions/5_normalization.R")
#source("labour_situation/1_functions/6_batch_load_data.R") #uncomment to recalculate from 0
#source("labour_situation/2_process_labour_situation/1_process_labour_situation.R") #uncomment to recalculate from 0
#source("labour_situation/3_processed_labour_situation/1_load_labour_situation.R") #uncomment to recalculate from 0
#source("labour_situation/4_descriptive_statistics/1_process_descriptive_statistics.R") #uncomment to recalculate from 0
source("labour_situation/4_descriptive_statistics/6_join_statistics.R")
|
91da4635327198eb76f4d3019b8229ecc8f7675d
|
c231c905f627d22ff1e8423f8eac86b8b0cc3af9
|
/Capstone-Movielens.R
|
5d6c3eba4125854e9cae59619310569c31d8e286
|
[] |
no_license
|
Marce68/Movielens_Capstone
|
ff46d0695f0fad2f4ce32b06ba4eb51679bfca39
|
ed9f7382ac53a1280d44d45b6727a01be4e4bf83
|
refs/heads/master
| 2022-09-07T02:26:33.411539
| 2020-05-31T16:27:35
| 2020-05-31T16:27:35
| 268,316,368
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,230
|
r
|
Capstone-Movielens.R
|
################################
# Importing data
################################
# Note: this process could take a couple of minutes
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
# MovieLens 10M dataset:
# https://grouplens.org/datasets/movielens/10m/
# http://files.grouplens.org/datasets/movielens/ml-10m.zip
if (sum(dir("~/R/Data/") == "ML10M.Rdata") == 0) {
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
ratings <- fread(text = gsub("::",
"\t",
readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId",
"movieId",
"rating",
"timestamp"))
movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
movies <- as.data.frame(movies) %>%
mutate(movieId = as.numeric(levels(movieId))[movieId],
title = as.character(title),
genres = as.character(genres))
movielens <- left_join(ratings,
movies,
by = "movieId")
save(movielens, file="~/R/Data/ML10M.Rdata")
}
################################
# Create edx set, validation set
################################
# Validation set will be 10% of MovieLens data
load(file = "~/R/Data/ML10M.RData")
paste("How many NA in movielens?", sum(is.na(movielens))) # Check if there are NA in the dataframe
options(digits=5)
set.seed(1, sample.kind="Rounding") # `set.seed(1)` for R version <= 3.5
test_index <- createDataPartition(y = movielens$rating,
times = 1,
p = 0.1,
list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# Make sure userId and movieId in validation set are also in edx set
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
# Clean Global Environment
rm(dl, ratings, movies, test_index, temp, movielens, removed)
################################
# Dataset exploration
################################
# Q1
dim(edx)
head(edx) %>% knitr::kable()
# Q2
edx %>%
group_by(rating) %>%
summarize(Num_Ratings = n()) %>%
arrange(desc(rating)) %>%
knitr::kable()
# Q3-Q4
data.frame(Movies = length(unique(edx$movieId)), Users = length(unique(edx$userId))) %>% knitr::kable()
# Q5
edx %>%
group_by(genres) %>%
summarize(Num_Ratings = length(rating)) %>%
summarize(Drama = sum(Num_Ratings[str_detect(genres,"Drama")]),
Comedy = sum(Num_Ratings[str_detect(genres,"Comedy")]),
Thriller = sum(Num_Ratings[str_detect(genres,"Thriller")]),
Romance = sum(Num_Ratings[str_detect(genres,"Romance")])) %>%
knitr::kable()
# Q6
edx %>%
group_by(movieId) %>%
summarize(Title = first(title),
Num_Ratings = length(rating)) %>%
arrange(desc(Num_Ratings)) %>%
slice(1:20) %>%
knitr::kable()
# Q7
edx %>%
group_by(rating) %>%
summarize(Occurance = n()) %>%
arrange(desc(Occurance)) %>%
knitr::kable()
# Q8
edx %>%
group_by(rating) %>%
summarize(Occurance = n()) %>%
arrange(desc(Occurance)) %>%
summarize(Full_Star = sum(Occurance[(rating - round(rating)) == 0]),
Half_Star = sum(Occurance[(rating - round(rating)) == 0.5])) %>%
knitr::kable()
################################
# Loss Function
################################
RMSE <- function (true_ratings, predicted_ratings) {
sqrt(mean((true_ratings - predicted_ratings)^2))
}
###############################################
# Recommendation Systems
###############################################
#########################
# Just Average Model ####
mu_hat <- mean(edx$rating)
naive_rmse <- RMSE(validation$rating, mu_hat)
rmse_results <- data_frame(Method = "Full Set - Just the average",
RMSE = naive_rmse)
rmse_results %>% knitr::kable()
#########################
# Movie Effect Model ####
mu <- mean(edx$rating)
b_i <-
edx %>%
group_by(movieId) %>%
summarize(b_i = mean(rating - mu))
b_i %>% qplot(b_i,
geom ="histogram",
bins = 30,
data = .,
color = I("black"))
predicted_ratings <- validation %>%
left_join(b_i, by='movieId') %>%
mutate(pred = mu + b_i) %>%
.$pred
model_1_rmse <- RMSE(predicted_ratings, validation$rating)
rmse_results <- bind_rows(rmse_results,
data_frame(Method="Full Set - Movie Effect Model",
RMSE = model_1_rmse ))
rmse_results %>% knitr::kable()
###############################
# Movie & User Effect Model ###
# Let's give a look to the distribution of the average rating of each user,
edx %>%
group_by(userId) %>%
summarize(b_u = mean(rating)) %>%
ggplot(aes(b_u)) +
geom_histogram(bins = 50, color = "black")
b_u <-
edx %>%
left_join(b_i, by='movieId') %>%
group_by(userId) %>%
summarize(b_u = mean(rating - mu - b_i))
b_u %>% qplot(b_u,
geom ="histogram",
bins = 50,
data = .,
color = I("black"))
predicted_ratings <- validation %>%
left_join(b_i, by='movieId') %>%
left_join(b_u, by='userId') %>%
mutate(pred = mu + b_i + b_u) %>%
.$pred
model_2_rmse <- RMSE(predicted_ratings, validation$rating)
rmse_results <- bind_rows(rmse_results,
data_frame(Method="Full Set - Movie & User Effects Model",
RMSE = model_2_rmse ))
rmse_results %>% knitr::kable()
# Let's give a closer look at our model, we'll recommend movies with higher b_i
# So let's check which movies have the highest b_i ...
movie_titles <- edx %>%
select(movieId, title) %>%
distinct()
# Top 10 best movies
b_i %>%
left_join(movie_titles, by="movieId") %>%
arrange(desc(b_i)) %>%
select(title, b_i) %>%
slice(1:10) %>%
knitr::kable()
# ... and which one will have the lowest b_i
# Top 10 worst movies
b_i %>%
left_join(movie_titles, by="movieId") %>%
arrange(b_i) %>%
select(title, b_i) %>%
slice(1:10) %>%
knitr::kable()
# There is something weird I don't know any movie among the best or the worst
# When calculating b_i or b_u we are grouping and averaging without considering
# the sample size, that is how many ratings the movie had or how many ratings the user
# did.
# How many ratings had the best movies?
edx %>%
count(movieId) %>%
left_join(b_i) %>%
left_join(movie_titles, by="movieId") %>%
arrange(desc(b_i)) %>%
select(title, b_i, n) %>%
slice(1:10) %>%
knitr::kable()
# How many ratings had the worst movies?
edx %>%
count(movieId) %>%
left_join(b_i) %>%
left_join(movie_titles, by="movieId") %>%
arrange(b_i) %>%
select(title, b_i, n) %>%
slice(1:10) %>%
knitr::kable()
##################
# Regularization
#####################################
# Let's use a more manageable dataset taking ramdomly 300,000 samples
#######################################################################################################
edx_subset <- sample(1:nrow(edx), 300000, replace = FALSE)
edx_1 <- edx[edx_subset, ]
# edx_1 <- edx
tst_idx <- createDataPartition(y = edx_1$rating,
times = 1,
p = 0.2,
list = FALSE)
trn_set <- edx_1[-tst_idx, ]
tst_set <- edx_1[tst_idx, ]
tst_set <- tst_set %>%
semi_join(trn_set, by = "movieId") %>%
semi_join(trn_set, by = "userId")
movie_titles <- edx_1 %>%
select(movieId, title) %>%
distinct()
# Just the average
mu_hat <- mean(trn_set$rating)
naive_rmse <- RMSE(tst_set$rating, mu_hat)
rmse_results <- data_frame(method = "Small Set - Just the average", RMSE = naive_rmse)
rmse_results %>% knitr::kable()
# Movie Effect
mu <- mean(trn_set$rating)
movie_avgs <- trn_set %>%
group_by(movieId) %>%
summarize(b_i = mean(rating - mu))
predicted_ratings <- mu + tst_set %>%
left_join(movie_avgs, by='movieId') %>%
.$b_i
model_1_rmse <- RMSE(predicted_ratings, tst_set$rating)
rmse_results <- bind_rows(rmse_results,
data_frame(method="Small Set - Movie Effect Model",
RMSE = model_1_rmse ))
rmse_results %>% knitr::kable()
# Movie and User effect
user_avgs <- trn_set %>%
left_join(movie_avgs, by='movieId') %>%
group_by(userId) %>%
summarize(b_u = mean(rating - mu - b_i))
predicted_ratings <- tst_set %>%
left_join(movie_avgs, by='movieId') %>%
left_join(user_avgs, by='userId') %>%
mutate(pred = mu + b_i + b_u) %>%
.$pred
model_2_rmse <- RMSE(predicted_ratings, tst_set$rating)
rmse_results <- bind_rows(rmse_results,
data_frame(method="Small Set - Movie & User Effects Model",
RMSE = model_2_rmse ))
rmse_results %>% knitr::kable()
###############################################################################
# We have a more manageable dataset but not considering the number of rating yet, in fact ...
###############################################################################################
movie_titles <- edx_1 %>%
select(movieId, title) %>%
distinct()
# Top 10 best movies
edx_1 %>%
dplyr::count(movieId) %>%
left_join(movie_avgs) %>%
left_join(movie_titles, by="movieId") %>%
arrange(desc(b_i)) %>%
select(title, b_i, n) %>%
slice(1:10) %>%
knitr::kable()
# Bottom 10 worst movies
edx_1 %>%
dplyr::count(movieId) %>%
left_join(movie_avgs) %>%
left_join(movie_titles, by="movieId") %>%
arrange(b_i) %>%
select(title, b_i, n) %>%
slice(1:10) %>%
knitr::kable()
# Let's try a possible value for regularization
lambda <- 3
movie_reg_avgs <- trn_set %>%
group_by(movieId) %>%
summarize(b_i = sum(rating - mu)/(n()+lambda), n_i = n())
data_frame(original = movie_avgs$b_i,
regularlized = movie_reg_avgs$b_i,
n = movie_reg_avgs$n_i) %>%
ggplot(aes(original, regularlized, size=sqrt(n))) +
geom_point(shape=1, alpha=0.5)
# Top 10 best movies after regularization
trn_set %>%
dplyr::count(movieId) %>%
left_join(movie_reg_avgs) %>%
left_join(movie_titles, by="movieId") %>%
arrange(desc(b_i)) %>%
select(title, b_i, n) %>%
slice(1:10) %>%
knitr::kable()
# Top 10 worst movies after regularization
trn_set %>%
dplyr::count(movieId) %>%
left_join(movie_reg_avgs) %>%
left_join(movie_titles, by="movieId") %>%
arrange(b_i) %>%
select(title, b_i, n) %>%
slice(1:10) %>%
knitr::kable()
predicted_ratings <- tst_set %>%
left_join(movie_reg_avgs, by='movieId') %>%
mutate(pred = mu + b_i) %>%
.$pred
model_3_rmse <- RMSE(predicted_ratings, tst_set$rating)
rmse_results <- bind_rows(rmse_results,
data_frame(method="Small Set - Regularized Movie Effect Model",
RMSE = model_3_rmse ))
rmse_results %>% knitr::kable()
# It' works but the trial value is probably not optimal so
# Let's use cross validation to choose a better lambda
lambdas <- seq(0, 10, 0.25)
mu <- mean(trn_set$rating)
reg_fun <- function(l) {
b_i <- trn_set %>%
group_by(movieId) %>%
summarize(b_i = sum(rating - mu)/(n()+l))
b_u <- trn_set %>%
left_join(b_i, by="movieId") %>%
group_by(userId) %>%
summarize(b_u = sum(rating - b_i - mu)/(n()+l))
predicted_ratings <-
tst_set %>%
left_join(b_i, by = "movieId") %>%
left_join(b_u, by = "userId") %>%
mutate(pred = mu + b_i + b_u) %>%
.$pred
return(RMSE(predicted_ratings, tst_set$rating))
}
rmses <- sapply(lambdas, reg_fun)
qplot(lambdas, rmses)
lambda <- lambdas[which.min(rmses)]
lambda
rmse_results <- bind_rows(rmse_results,
data_frame(method="Small Set - Regularized Movie & User Effect Model",
RMSE = min(rmses)))
rmse_results %>% knitr::kable()
# Let's use the same lambda over the full data set
mu <- mean(edx$rating)
b_i <- edx %>%
group_by(movieId) %>%
summarize(b_i = sum(rating - mu)/(n()+lambda))
b_u <- edx %>%
left_join(b_i, by="movieId") %>%
group_by(userId) %>%
summarize(b_u = sum(rating - b_i - mu)/(n()+lambda))
predicted_ratings <-
validation %>%
left_join(b_i, by = "movieId") %>%
left_join(b_u, by = "userId") %>%
mutate(pred = mu + b_i + b_u) %>%
.$pred
model_4_rmse <- RMSE(predicted_ratings, validation$rating)
rmse_results <- bind_rows(rmse_results,
data_frame(method="Full Set - Regularized Movie & User Effect Model",
RMSE = model_4_rmse ))
rmse_results %>% knitr::kable()
# Let's finally check what are the best and worst movies
movie_titles <- edx %>%
select(movieId, title) %>%
distinct()
# Top 10 best movies after regularization
edx %>%
dplyr::count(movieId) %>%
left_join(b_i) %>%
left_join(movie_titles, by="movieId") %>%
arrange(desc(b_i)) %>%
select(title, b_i, n) %>%
slice(1:10) %>%
knitr::kable()
# Top 10 worst movies after regularization
edx %>%
dplyr::count(movieId) %>%
left_join(b_i) %>%
left_join(movie_titles, by="movieId") %>%
arrange(b_i) %>%
select(title, b_i, n) %>%
slice(1:10) %>%
knitr::kable()
|
47e16c4461484766b28970d1f1eb3f5bbd7ea06b
|
5de5c723f8a2269c1682d3df9ea22bb5dda1d4ee
|
/R/is.RStudio.R
|
acba1ba2159e60d5d31fa67974e54f77848cfac9
|
[] |
no_license
|
paulponcet/observer
|
0048f28d5b7c16a3d31d82a85c0273f02a49d540
|
5db216f81d570ea71d4590afb6bf65e221b3a365
|
refs/heads/master
| 2021-01-12T05:08:17.191667
| 2017-01-29T18:47:48
| 2017-01-29T18:47:48
| 77,863,016
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 96
|
r
|
is.RStudio.R
|
# used by the 'View_obs' function
is.RStudio <-
function()
{
Sys.getenv("RSTUDIO") == "1"
}
|
988004b2dac07e8dc6647b820f1e3f606dd230b7
|
f5488e63bbedd3340a86b2a7073ca9de3ab53532
|
/Week 2 Russian Twitter.R
|
7f3026c8879928a52e023472d09c684bc238c9c3
|
[] |
no_license
|
ClioLI/Computational_Comm
|
f4e8d8b2b54a62694f47e878dec497aaa5c4dc7f
|
1f2b5ccac92330fe05f184dacdca042f0ce5e7a8
|
refs/heads/main
| 2023-03-22T23:10:10.406941
| 2021-03-19T07:01:20
| 2021-03-19T07:01:20
| 349,328,867
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 27,438
|
r
|
Week 2 Russian Twitter.R
|
###############################################################################################
## Russian Twitter Accounts and the Partisan Polarization of Vaccine Discourse, 2015-2017: ##
## Supplementary code ##
## ##
## Author: Dror Walter ##
## ##
## To Cite: ##
## Walter D., Ophir Y. & Hall Jamieson, K. (2020) ##
## Russian Twitter Accounts and the Partisan Polarization of Vaccine Discourse, 2015-2017. ##
## American Journal of Public Health. http://dx.doi.org/10.2105/AJPH.2019.305564 ##
## First published Online (19 March 2020) ##
## ##
###############################################################################################
################################ Importing Libraries
options(stringsAsFactors = F)
library(stringi)
library(stringr)
library(qdap)
library(tm)
library(ggplot2)
library(lubridate)
library(irr)
library(quanteda)
library(ldatuning)
library(topicmodels)
library(textcat)
library(parallel)
library(RSQLite)
library(doParallel)
library(scales)
library(lsa)
library(igraph)
library(cld2)
library(tidyverse)
library(dplyr)
library(rgexf)
library(openxlsx)
############################## Importing and Cleaning the Data
# Read twitter data (available at Twitter Election Integritiy Database)
text.df <- read.csv("raw_data/ira_tweets_csv_hashed.csv", stringsAsFactors = F)
# limit to English tweets - based on twitter metadata
text.en <- text.df[text.df$tweet_language == "en",]
# Convect to lower case
text.en$tweet_text2 <- tolower(text.en$tweet_text)
# limit to English tweets as identified by google cld
text.en$textcatcld2<- cld2::detect_language(text.en$tweet_text)
text.en<-text.en[which(text.en$textcatcld2=='en'),]
# correct encoding
text.en$tweet_text3<-iconv(text.en$tweet_text2, "latin1", "ASCII", sub="")
text.en$tweet_text3<-iconv(text.en$tweet_text3, "UTF-8", "ASCII", sub="")
text.df<-text.en
rm(text.en)
# Cleaning Twitter artifacts (links, images etc.)
text.df$tweet_text3<-gsub("[h][t][t][p][^[:space:]]*","",text.df$tweet_text3)
text.df$tweet_text3<-gsub("[h][t][t][p][s][^[:space:]]*","",text.df$tweet_text3)
text.df$tweet_text3<-gsub("[p][i][c][.][^[:space:]]+","",text.df$tweet_text3)
text.df$tweet_text3<-gsub("[^[:space:]]*[.][c][o][m][^[:space:]]*","",text.df$tweet_text3)
text.df$tweet_text3<-gsub("[w][w][w][.][^[:space:]]+","",text.df$tweet_text3)
text.df$tweet_text3<-gsub("[^[:space:]]*[.][l][y][^[:space:]]*","",text.df$tweet_text3)
text.df$tweet_text3<-gsub("[^[:space:]]*[y][o][u][t][u][.][b][e][^[:space:]]*","",text.df$tweet_text3)
text.df$tweet_text3<-gsub("[^[:space:]]*[.][c][o][^[:space:]]*","",text.df$tweet_text3)
text.df$tweet_text3<-gsub("[^[:space:]]*[.][c][m][^[:space:]]*","",text.df$tweet_text3)
text.df$tweet_text3<-gsub("[^[:space:]]*[.][o][r][g][^[:space:]]*","",text.df$tweet_text3)
text.df$tweet_text3<-gsub("[^[:space:]]*[w][a][p][t][o][.][s][t][^[:space:]]*","",text.df$tweet_text3)
text.df$tweet_text3<-gsub("[&][a][m][p]"," ",text.df$tweet_text3)
# arranging the data and renaming columns
data <- text.df
colnames(data)[13]<-"orig_text"
colnames(data)[34]<-"text"
# setting data as date format
data$date2 <- as.Date(data$tweet_time, '%Y-%m-%d')
# adding index column
data$index<-seq(1,nrow(data))
# limiting dates to relevant
data<-data[data$date2<"2018-01-01",]
data<-data[data$date2>"2015-01-01",]
#########################################################################
########### #######
########### LDA PIPELINE- corpus and hyperparameter grid search #######
########### #######
#########################################################################
# removing extremely short text data
removed_short<-subset(data,nchar(as.character(data$text))<6)
data2<-subset(data,!nchar(as.character(data$text))<6)
# removing duplicate tweets for data3 and saving removed in DF.
# will be added after model estimation tp calculate sailence of all texts
removed_df<-data2[duplicated(data2$text),]
data3 <- data2[!duplicated(data2$text),]
# sampling 10% for K and Hyperparameter optimization
data3_10perc_numbers<-sample((1:nrow(data3)),(nrow(data3)/10),replace=FALSE)
data3_10perc<-data3[data3_10perc_numbers,]
# Building the corpus data - Notice the additiomal stopwords and the removal of extremly common/rare words
mycorpus <- corpus(data3_10perc)
stopwords_and_single<-c(stopwords("english"),LETTERS,letters, "t.co", "http", "https", "rt", "p", "amp", "via")
dfm_counts <- dfm(mycorpus,tolower = TRUE, remove_punct = TRUE,remove_numbers=TRUE,
remove = stopwords_and_single,stem = FALSE,
remove_separators=TRUE)
docnames(dfm_counts)<-dfm_counts@docvars$index
# removing extemely common or rare tokens
dfm_counts2<-dfm_trim(dfm_counts, max_docfreq = 0.95, min_docfreq=0.0001,docfreq_type="prop")
# convert to LDA-ready object
dtm_lda <- convert(dfm_counts2, to = "topicmodels",docvars = dfm_counts2@docvars)
full_data<-dtm_lda
# count numer of documents for crossvalidation
n <- nrow(full_data)
# clean temp data
rm(text.df)
rm(dfm_counts)
rm(dfm_counts2)
# Run the crossvalidation loop
print(Sys.time())
# create container for results
MainresultDF<-data.frame(k=c(1),perplexity=c(1),myalpha=c("x"))
MainresultDF<-MainresultDF[-1,]
# set possible alpha and k values
candidate_alpha<- c(0.01, 0.05, 0.1, 0.2, 0.5) # candidates for alpha values
candidate_k <- c(2, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120) # candidates for how many topics
# run the 10-fold cross validation
for (eachalpha in candidate_alpha) {
print ("now running ALPHA:")
print (eachalpha)
print(Sys.time())
cluster <- makeCluster(detectCores(logical = TRUE) - 1) # We are leaving one Core spare. If number of corse on pc is 1, then -1 in this line should be removed.
registerDoParallel(cluster)
clusterEvalQ(cluster, {
library(topicmodels)
})
folds <- 10
splitfolds <- sample(1:folds, n, replace = TRUE)
clusterExport(cluster, c("full_data", "splitfolds", "folds", "candidate_k"))
system.time({
results <- foreach(j = 1:length(candidate_k), .combine = rbind) %dopar%{
k <- candidate_k[j]
print(k)
results_1k <- matrix(0, nrow = folds, ncol = 2)
colnames(results_1k) <- c("k", "perplexity")
for(i in 1:folds){
train_set <- full_data[splitfolds != i , ]
valid_set <- full_data[splitfolds == i, ]
fitted <- LDA(train_set, k = k, method = "Gibbs",
control = list(alpha=eachalpha) )
results_1k[i,] <- c(k, perplexity(fitted, newdata = valid_set))
}
return(results_1k)
}
})
stopCluster(cluster)
results_df <- as.data.frame(results)
results_df$myalpha<-as.character(eachalpha)
MainresultDF<-rbind(MainresultDF,results_df)
}
print ("DONE!")
print(Sys.time())
# arrange and examine results
MainresultDF$kalpha=paste0(as.character(MainresultDF$k),MainresultDF$myalpha)
ggplot(MainresultDF) +geom_boxplot(aes(x=k, y=perplexity, group=kalpha,color=myalpha))
# run additional k and alpha values
candidate_alpha<- c(0.01,0.05)
candidate_k <- c(130,140,150,160) # candidates for how many topics
for (eachalpha in candidate_alpha) {
print ("now running ALPHA:")
print (eachalpha)
print(Sys.time())
cluster <- makeCluster(detectCores(logical = TRUE) - 1) # leave one CPU spare...
registerDoParallel(cluster)
clusterEvalQ(cluster, {
library(topicmodels)
})
folds <- 10
splitfolds <- sample(1:folds, n, replace = TRUE)
clusterExport(cluster, c("full_data", "splitfolds", "folds", "candidate_k"))
system.time({
results <- foreach(j = 1:length(candidate_k), .combine = rbind) %dopar%{
k <- candidate_k[j]
print(k)
results_1k <- matrix(0, nrow = folds, ncol = 2)
colnames(results_1k) <- c("k", "perplexity")
for(i in 1:folds){
train_set <- full_data[splitfolds != i , ]
valid_set <- full_data[splitfolds == i, ]
fitted <- LDA(train_set, k = k, method = "Gibbs",
control = list(alpha=eachalpha) )
results_1k[i,] <- c(k, perplexity(fitted, newdata = valid_set))
}
return(results_1k)
}
})
stopCluster(cluster)
NEWresults_df <- as.data.frame(results)
NEWresults_df$myalpha<-as.character(eachalpha)
MainresultDF$kalpha<-paste0(as.character(MainresultDF$k),MainresultDF$myalpha)
NEWresults_df$kalpha<-paste0(as.character(NEWresults_df$k),NEWresults_df$myalpha)
MainresultDF<-rbind(MainresultDF,NEWresults_df)
}
print ("DONE!")
print(Sys.time())
# examine results
ggplot(MainresultDF) +
geom_boxplot(aes(x=k, y=perplexity, group=kalpha,color=myalpha))+
geom_smooth(se = TRUE, aes(x=k, y=perplexity,color=myalpha))
# run last k values
candidate_alpha<- c(0.01)
candidate_k <- c(170,180,190,200) # candidates for how many topics
for (eachalpha in candidate_alpha) {
print ("now running ALPHA:")
print (eachalpha)
print(Sys.time())
cluster <- makeCluster(detectCores(logical = TRUE) - 1) # leave one CPU spare...
registerDoParallel(cluster)
clusterEvalQ(cluster, {
library(topicmodels)
})
folds <- 10
splitfolds <- sample(1:folds, n, replace = TRUE)
clusterExport(cluster, c("full_data", "splitfolds", "folds", "candidate_k"))
system.time({
results <- foreach(j = 1:length(candidate_k), .combine = rbind) %dopar%{
k <- candidate_k[j]
print(k)
results_1k <- matrix(0, nrow = folds, ncol = 2)
colnames(results_1k) <- c("k", "perplexity")
for(i in 1:folds){
train_set <- full_data[splitfolds != i , ]
valid_set <- full_data[splitfolds == i, ]
fitted <- LDA(train_set, k = k, method = "Gibbs",
control = list(alpha=eachalpha) )
results_1k[i,] <- c(k, perplexity(fitted, newdata = valid_set))
}
return(results_1k)
}
})
stopCluster(cluster)
NEWresults_df <- as.data.frame(results)
NEWresults_df$myalpha<-as.character(eachalpha)
MainresultDF$kalpha<-paste0(as.character(MainresultDF$k),MainresultDF$myalpha)
NEWresults_df$kalpha<-paste0(as.character(NEWresults_df$k),NEWresults_df$myalpha)
MainresultDF<-rbind(MainresultDF,NEWresults_df)
}
print ("DONE!")
print(Sys.time())
# examine full k and alpha resutls
ggplot(MainresultDF) +
geom_boxplot(aes(x=k, y=perplexity, group=kalpha,color=myalpha))+
scale_color_discrete(name = "Alpha Levels")+
xlab("K (Number of Topics)")+
ylab("Perplexity")
# Identify 2nd derivative max point on perplexity
MainresultDF_MYALPHA<-MainresultDF[MainresultDF$myalpha==0.01,]
cars.spl <- with(MainresultDF_MYALPHA, smooth.spline(k, perplexity, df = 3))
plot(with(cars, predict(cars.spl, x = MainresultDF_MYALPHA$k, deriv = 2)), type = "l")
abline(v=60)
# Run LDA with optimal values of alpha 0.01 and k=60 on full data
mycorpus <- corpus(data3)
stopwords_and_single<-c(stopwords("english"),LETTERS,letters, "t.co", "http", "https", "rt", "p", "amp", "via")
dfm_counts <- dfm(mycorpus,tolower = TRUE, remove_punct = TRUE,remove_numbers=TRUE,
remove = stopwords_and_single,stem = FALSE,
remove_separators=TRUE)
docnames(dfm_counts)<-dfm_counts@docvars$index
dfm_counts2<-dfm_trim(dfm_counts, max_docfreq = 0.95, min_docfreq=0.0001,docfreq_type="prop")
dtm_lda <- convert(dfm_counts2, to = "topicmodels",docvars = dfm_counts2@docvars)
LDA.60 <- LDA(dtm_lda, k = 60, method = "Gibbs",control = list(alpha=0.01,seed=125231))
LDAfit<-LDA.60
#########################################################################
########### #######
########### Analyzing the Topics #######
########### #######
#########################################################################
# Printing main files for analysis (WORDS/FREX/TEXTS)
datacolnum=13 ## setting the text column
## Funciton to print Beta, Frex and Theta
extract_topic_xls<-function (eachLDA) {
LDAfit<-eachLDA
mybeta<-data.frame(LDAfit@beta)
colnames(mybeta)<-LDAfit@terms
mybeta<-t(mybeta)
colnames(mybeta)<-seq(1:ncol(mybeta))
mybeta=exp(mybeta)
#### Now we cycle and print top words for each topic
nwords=50
topwords <- mybeta[1:nwords,]
for (i in 1:LDAfit@k) {
tempframe <- mybeta[order(-mybeta[,i]),]
tempframe <- tempframe[1:nwords,]
tempvec<-as.vector(rownames(tempframe))
topwords[,i]<-tempvec
}
rownames(topwords)<-c(1:nwords)
kalpha<-paste0(as.character(LDAfit@k),"_",gsub("\\.","",as.character(LDAfit@alpha)))
openxlsx::write.xlsx(topwords, paste0(kalpha,"_ALLBOTS_Topwords.xlsx"))
#### Get Frex (unique) words
#### get the beta
mybeta<-data.frame(LDAfit@beta)
colnames(mybeta)<-LDAfit@terms
mybeta<-t(mybeta)
colnames(mybeta)<-seq(1:ncol(mybeta))
mybeta=exp(mybeta)
#### apply FREX formula below
myw=0.3
word_beta_sums<-rowSums(mybeta)
my_beta_for_frex<-mybeta
for (m in 1:ncol(my_beta_for_frex)) {
for (n in 1:nrow(my_beta_for_frex)) {
my_beta_for_frex[n,m]<-1/(myw/(my_beta_for_frex[n,m]/word_beta_sums[n])+((1-myw)/my_beta_for_frex[n,m]))
}
print (m)
}
#### print top 50 frex:
nwords=50
topwords <- my_beta_for_frex[1:nwords,]
for (i in 1:LDAfit@k) {
tempframe <- my_beta_for_frex[order(-my_beta_for_frex[,i]),]
tempframe <- tempframe[1:nwords,]
tempvec<-as.vector(rownames(tempframe))
topwords[,i]<-tempvec
}
rownames(topwords)<-c(1:nwords)
kalpha<-paste0(as.character(LDAfit@k),"_",gsub("\\.","",as.character(LDAfit@alpha)))
openxlsx::write.xlsx(topwords,paste0(kalpha,"_ALLBOTS_TopFREX.xlsx"))
#### TOP TEXTS --->
data33<-data3
data33$index<-as.character(data33$index)
deleted_lda_texts<-(setdiff(data33$index, LDAfit@documents))
'%!in%' <- function(x,y)!('%in%'(x,y))
data33<-data33[data33$index %!in% deleted_lda_texts,]
metadf<-data33
meta_theta_df<-cbind(metadf[datacolnum],LDAfit@gamma)
ntext=50
toptexts <- mybeta[1:ntext,]
for (i in 1:LDAfit@k) {
print(i)
tempframe <- meta_theta_df[order(-meta_theta_df[,i+1]),]
tempframe <- tempframe[1:ntext,]
tempvec<-as.vector(tempframe[,1])
toptexts[,i]<-tempvec
}
rownames(toptexts)<-c(1:ntext)
kalpha<-paste0(as.character(LDAfit@k),"_",gsub("\\.","",as.character(LDAfit@alpha)))
openxlsx::write.xlsx(toptexts, paste0(kalpha,"_ALLBOTS_TopTexts.xlsx"))
}
## Apply function to model
extract_topic_xls(LDAfit)
# returning the duplicate texts deleted earlier
data33<-data3
data33$index<-as.character(data33$index)
deleted_lda_texts<-(setdiff(data33$index, LDAfit@documents))
#deleted_lda_texts2<-(setdiff(as.character(LDAfit@documents),as.character(data3$doc_id)))
#deleted_lda_texts<-unique(c(deleted_lda_texts1,deleted_lda_texts2))
'%!in%' <- function(x,y)!('%in%'(x,y))
#data33<-data3
data33<-data33[data33$index %!in% deleted_lda_texts,]
metadf<-data33
meta_theta_df<-cbind(metadf,LDAfit@gamma)
removed_df2<-inner_join(removed_df,meta_theta_df,by="text")
removed_df2<-removed_df2[,-c(37:73)]
colnames(removed_df2)<-gsub("\\.x","",colnames(removed_df2))
removed_df2$index<-as.character(removed_df2$index)
meta_theta_df2<-bind_rows(meta_theta_df,removed_df2)
meta_theta_df<-meta_theta_df2
rm(meta_theta_df2)
# marking the users and tweets which are vaccine related
vaccine_words <- c("vaccin")
vactweets <- meta_theta_df[grep(paste0(vaccine_words, collapse = "|"), meta_theta_df$tweet_text, value=FALSE),]
vacc_users<-unique(vactweets$userid)
meta_theta_df2<-meta_theta_df
meta_theta_df2$vacc_user<-"no_vacc"
meta_theta_df2[which(meta_theta_df2$userid %in% vacc_users),"vacc_user"]<-"yes_vac"
# Running ANTMN function -
# cite: Walter and Ophir (2019) News Frame Analysis. Communication Methods and Measures 13(4), 248-266
network_from_LDA<-function(LDAobject,deleted_topics=c(),topic_names=c(),save_filename="",topic_size=c()) {
# Importing needed packages
require(lsa) # for cosine similarity calculation
require(dplyr) # general utility
require(igraph) # for graph/network managment and output
print("Importing model")
# first extract the theta matrix form the topicmodel object
theta<-LDAobject@gamma
# adding names for culumns based on k
colnames(theta)<-c(1:LDAobject@k)
# claculate the adjacency matrix using cosine similarity on the theta matrix
mycosine<-cosine(as.matrix(theta))
colnames(mycosine)<-colnames(theta)
rownames(mycosine)<-colnames(theta)
# Convert to network - undirected, weighted, no diagonal
print("Creating graph")
topmodnet<-graph.adjacency(mycosine,mode="undirected",weighted=T,diag=F,add.colnames="label") # Assign colnames
# add topicnames as name attribute of node - importend from prepare meta data in previous lines
if (length(topic_names)>0) {
print("Topic names added")
V(topmodnet)$name<-topic_names
}
# add sizes if passed to funciton
if (length(topic_size)>0) {
print("Topic sizes added")
V(topmodnet)$topic_size<-topic_size
}
newg<-topmodnet
# delete 'garbage' topics
if (length(deleted_topics)>0) {
print("Deleting requested topics")
newg<-delete_vertices(topmodnet, deleted_topics)
}
# run community detection and attach as node attribute
print("Calculating communities")
mylouvain<-(cluster_louvain(newg))
mywalktrap<-(cluster_walktrap(newg))
myspinglass<-(cluster_spinglass(newg))
myfastgreed<-(cluster_fast_greedy(newg))
myeigen<-(cluster_leading_eigen(newg))
V(newg)$louvain<-mylouvain$membership
V(newg)$walktrap<-mywalktrap$membership
V(newg)$spinglass<-myspinglass$membership
V(newg)$fastgreed<-myfastgreed$membership
V(newg)$eigen<-myeigen$membership
# if filename is passsed - saving object to graphml object. Can be opened with Gephi.
if (nchar(save_filename)>0) {
print("Writing graph")
write.graph(newg,paste0(save_filename,".graphml"),format="graphml")
}
# graph is returned as object
return(newg)
}
costum_topic_labels<-c("Crime", "Disasters and disruptions", "Aphorisms", "Municipalities", "BlackLivesMatter", "Arrests", "Clinton emails", "Victims and deaths", "Leisure activities", "Race whites and blacks", "North Korea", "education", "Sports", "Police brutality", "Extremist groups", "Finances", "Legal issues", "Black history", "Terrorist attacks", "Watch listen", "Criminals", "Presidential elections", "Mixed and science", "Islamist violence", "Obama and Trump", "Laws ", "Sports", "Blessings holidays congrats", "Celebs", "Imitating black southern language", "Trump activities and nominations", "RT of a user writing about loans", "Companies and business", "Diets", "Crime", "Sports", "Immigration", "Pro Trump Anti his enemies", "GOP primaries", "Aphorisms", "Weather", "Anti media and Dems", "Mixed mundane", "Taxes budgets money", "Bills", "Supporting Trump", "Primaries", "Attacks on fake media", "Ukranian nuclear crisis", "Workout", "Sports", "Mixed", "Health and science", "Sports", "Accidents", "Mueller invistigation", "Foreign affairs", "Mixed PP and personal attacks", "Sexual misconduct", "Conservative tweets from pjnet and tcot")
mynewnet<-network_from_LDA(LDAobject=LDAfit,
topic_names=mynames,
save_filename="TOPIC_ALL_BOT_NET")
#########################################################################
########### #######
########### Thematic Communities Analysis #######
########### #######
#########################################################################
# prepare data on user-level aggregation
## average topic loading per user
meta_theta_by_user<-aggregate(x = meta_theta_df2[,c(37:(ncol(meta_theta_df2)-1))],
by = list(meta_theta_df2$userid), FUN = "mean")
## retweet sum per user
meta_theta_by_user_retweets<-aggregate(x = meta_theta_df2[,c(26)],
by = list(meta_theta_df2$userid), FUN = "sum")
## user engagemnt with vaccines
meta_theta_by_user_ynvac<-aggregate(x = meta_theta_df2[,97],
by = list(meta_theta_df2$userid), FUN = "unique")
## user lifetime volume of tweets
meta_theta_df3<-meta_theta_df2
meta_theta_df3$forsum<-1
meta_theta_by_user_volume<-aggregate(x = meta_theta_df3[,"forsum"],
by = list(meta_theta_df3$userid), FUN = "sum")
rm(meta_theta_df3)
## combine to user level data
rownames(meta_theta_by_user)<-meta_theta_by_user$Group.1
meta_theta_by_user<-meta_theta_by_user[,-1]
meta_theta_by_user2<-t(meta_theta_by_user)
# correalte users by their topic loading
mybeta_cor<-cosine(as.matrix(meta_theta_by_user2))
# create the network
sem_net_weighted<-graph.adjacency(mybeta_cor,mode="undirected",weighted=T,diag=F,add.colnames="label") # Assign colnames
# add node level metadata from previous aggregation
V(sem_net_weighted)$name<-V(sem_net_weighted)$label
V(sem_net_weighted)$ynvac<-meta_theta_by_user_ynvac$x
V(sem_net_weighted)$size<-meta_theta_by_user_volume$x
V(sem_net_weighted)$retw<-meta_theta_by_user_retweets$x
# Keep significant edges p<0.05
set.seed(763423)
g<-disparity_filter(g=sem_net_weighted,alpha=0.05)
# run community detection
set.seed(433547)
V(g)$louvain<-(cluster_louvain(g)) $membership
# export the graph
saveAsGEXF(g,"outputTEMP/TROLL_USER_NET_bbone_new.gexf")
print("done!")
# Explore the Communities
## Find users most central for each community
### get sum of edgeds of each node and inside edges - calculate for each node
nodelist<-list()
for (node in 1:length(V(g))) {
print(node)
outside<-strength(g, vids = V(g)[node])
tempg<-induced_subgraph(g,V(g)$louvain==V(g)$louvain[node])
inside<-strength(tempg, vids = V(tempg)$label==V(g)[node]$label)
nodelist[[node]]<-data.frame(
node=node,label=V(g)[node]$label,inside,comm=V(g)$louvain[node],between=outside,within=inside,commstr=inside/outside)
}
user_comm_df<-do.call(rbind,nodelist)
### grab for each community the top 20 users
top_user_com_df<-data.frame(matrix(NA, nrow = 20, ncol = length(unique(user_comm_df$comm))))
for (i in 1:max(user_comm_df$comm)) {
print (i)
temp_df<-user_comm_df[user_comm_df$comm==i,]
temp_df<-temp_df[order(temp_df$commstr,decreasing = TRUE),]
towrite<-temp_df$label[1:20]
top_user_com_df[,i]<-towrite
}
## print top tweets
### go to data - filter by top 20 users
### grab 200 random tweets - print to dataframe and XL
comm_tweets_list<-list()
for (i in 1:max(user_comm_df$comm)) {
print(i)
temp_meta_theta_df<-meta_theta_df2[meta_theta_df2$userid %in% top_user_com_df[,i],]
temp_meta_theta_df<- temp_meta_theta_df[sample(nrow(temp_meta_theta_df), 200), ]
comm_tweets_list[[i]]<-c(temp_meta_theta_df)
write.xlsx(temp_meta_theta_df,paste0(as.character(i),"_COMM_200_tweets.xlsx"))
}
## for each comm get vaccine related tweets that appeared in it
vaccine_words <- c("vaccin")
for (i in 1:max(user_comm_df$comm)) {
print(i)
temp_df<-user_comm_df[user_comm_df$comm==i,]
temp_meta_theta_df<-meta_theta_df2[meta_theta_df2$userid %in% temp_df$label,]
temp_meta_theta_df <- temp_meta_theta_df[grep(paste0(vaccine_words, collapse = "|"),
temp_meta_theta_df$tweet_text, value=FALSE),]
if (nrow(temp_meta_theta_df)>0) {
write.xlsx(temp_meta_theta_df,paste0(as.character(i),"_COMM_VAC_tweets.xlsx"))
}
}
# calculate the salience of vaccine related content in each community
## by number of tweets
vacc_comm_ratio<-list()
for (i in 1:max(user_comm_df$comm)) {
print(i)
temp_df<-user_comm_df[user_comm_df$comm==i,]
temp_meta_theta_df<-meta_theta_df2[meta_theta_df2$userid %in% temp_df$label,]
nrow1<-nrow(temp_meta_theta_df)
temp_meta_theta_df <- temp_meta_theta_df[grep(paste0(vaccine_words, collapse = "|"),
temp_meta_theta_df$tweet_text, value=FALSE),]
nrow2<-nrow(temp_meta_theta_df)
vacc_comm_ratio[[i]]<-c(i,(nrow2/nrow1))
}
toprint<-t(data.frame(vacc_comm_ratio))
rownames(toprint)<-c()
colnames(toprint)<-c("comm","ratio")
toprint<-as.data.frame(toprint)
toprint[order(toprint$ratio),]
## by number of users
vacc_user_ratio<-list()
for (i in 1:max(user_comm_df$comm)) {
print(i)
temp_df<-user_comm_df[user_comm_df$comm==i,]
temp_meta_theta_df<-meta_theta_df2[meta_theta_df2$userid %in% temp_df$label,]
nrow1<-length(unique(temp_meta_theta_df$userid))
temp_meta_theta_df <- temp_meta_theta_df[grep(paste0(vaccine_words, collapse = "|"),
temp_meta_theta_df$tweet_text, value=FALSE),]
nrow2<-length(unique(temp_meta_theta_df$userid))
vacc_user_ratio[[i]]<-c(i,(nrow2/nrow1))
}
toprint<-t(data.frame(vacc_user_ratio))
rownames(toprint)<-c()
colnames(toprint)<-c("comm","ratio")
toprint<-as.data.frame(toprint)
toprint[order(toprint$ratio),]
## print the Top Words for each Community
commlabels<-c("1: Hard News",
"2: Anti-Trump",
"3: Pro-Trump",
"4: Youth Talk and Celebs",
"5: African American and BLM",
"6: Mixed International",
"7: Ukraine",
"8: Soft News",
"9: Retweets and Hashtag Games")
comm_as_string<-comm_as_string[-1,]
comm_words <- comm_as_string %>%
unnest_tokens(word, text) %>%
count(comm, word, sort = TRUE)
pd<-comm_words %>%
filter(!word %in% c(stopwords_and_single,"just","will")) %>%
arrange(desc(n)) %>%
mutate(word = factor(word, levels = rev(unique(word)))) %>%
group_by(comm) %>%
top_n(15) %>%
ungroup() %>%
arrange(comm, n) %>%
mutate(order = row_number())
ggplot(pd, aes(order, n)) +
geom_bar(stat = "identity", show.legend = FALSE) +
facet_wrap(~ comm, scales = "free", labeller=labeller(comm = commlabels)) +
xlab("") +
ylab("Freq") +
theme_bw() +
# Add categories to axis
scale_x_continuous(
breaks = pd$order,
labels = pd$word,
expand = c(0,0)
) +
ylim(0,120000)+
coord_flip()
|
307131f173f5cfc9545417ea1fc0955b7f5fd8c5
|
0c6b7e0a02150655d3391a91a8aae3b806d4a6c7
|
/Scripts/Analisis Nacional/scrap_ALL_sinca.R
|
7ea247f3ca1b4c6c53fc198368ec2413bfe9ae39
|
[] |
no_license
|
pmbusch/Reportes-SINCA
|
f3a23699334821c20457e109a5a1d714ee9b9691
|
4607f692aa5c109eaa4a5dfead9b0ace72c4c55a
|
refs/heads/master
| 2023-02-22T04:06:44.125280
| 2023-02-05T00:10:43
| 2023-02-05T00:10:43
| 279,363,628
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,864
|
r
|
scrap_ALL_sinca.R
|
### Proyecto SINCA
## Descarga Masiva Datos SINCA. Descarga unicamente los CSV, sin modificarlos
## PBH Jul 2020
## Ultima atualizacion: PBH Jul 2020
## Carga Datos de las estaciones -------------
# Scripts necesarios
source('Scripts/00-Funciones.R')
source('Scripts/03_f_scrap_sinca.R')
library(tidyverse)
library(lubridate)
library(ggmap)
library(ggrepel)
# Tipo de las variables: c character, d double, D date
cols_type <- "ccclccccdddccccDDccDccccclcccccc"
df_estaciones <- read_delim("Data/DatosEstacioneSINCA.csv",
delim = ";", skip = 1, na = c("NA"),
col_types = cols_type,
locale = locale(encoding = "windows-1252"))
rm(cols_type)
spec(df_estaciones)
df_estaciones %>% names
df_estaciones$region %>% unique()
df_estaciones$pollutant %>% unique()
# Limites temporales descarga
# Si estaba al dia al momento de recolectar la info, actualizamos la fecha fin
df_estaciones <- df_estaciones %>%
mutate(
contaminante_fechaFin = if_else(fecha_fin_actual, Sys.Date(), contaminante_fechaFin),
from = contaminante_fechaInicio %>%
strptime(format='%Y-%m-%d') %>% as_date(),
to = contaminante_fechaFin %>%
strptime(format='%Y-%m-%d') %>% as_date())
# Fechas en formato descarga (ej: pasar de 2015-12-31 a 151231)
df_estaciones <- df_estaciones %>%
mutate(from=paste(f_split_n(from,'-',1) %>% str_sub(3,4),
f_split_n(from,'-',2),
f_split_n(from,'-',3),sep=''),
to=paste(f_split_n(to,'-',1) %>% str_sub(3,4),
f_split_n(to,'-',2),
f_split_n(to,'-',3),sep=''))
# DESCARGA DE DATOS DE CONCENTRACION -----------------
# Reemplazable macro, date_from, date_to
url <- 'https://sinca.mma.gob.cl/cgi-bin/APUB-MMA/apub.tsindico2.cgi?outtype=xcl¯o=%s&from=%s&to=%s&path=/usr/airviro/data/CONAMA/&lang=esp'
# Crear descarga con parametros establecidos
df_estaciones <- df_estaciones %>% mutate(url_descarga=sprintf(url,macro,from,to))
# Loop para descargar datos
# jerarquia: region-provincia- file: estacion_cont
regiones <- df_estaciones$region %>% unique()
for (r in regiones){
cat("Descargando Region ",r,"\n", sep = "")
dir.create(paste("Data/Provincias/",r,sep=""), showWarnings = F)
provincias <- df_estaciones %>% filter(region==r) %>% pull(provincia) %>% unique()
for (p in provincias){
cat("Descargando Provincia de ",p,"\n", sep = "")
dir.create(paste("Data/Provincias/",r,"/",p,sep=""), showWarnings = F)
## Mapa estaciones por provincia -----------
estaciones_mapa <- df_estaciones %>% filter(provincia==p) %>%
group_by(estacion) %>%
summarise(longitud=first(longitud), latitud=first(latitud))%>%
ungroup()
location <- make_bbox(estaciones_mapa$longitud,estaciones_mapa$latitud, f=0.35)
if (nrow(estaciones_mapa==1)){ # Caso que solo sea 1 estacion
location <- location+0.01*c(-1, -1, 1, 1)
}
map <- get_map(location=location, source = "stame",
maptype = "terrain", crop=T)
m <- ggmap(map, extent = "device")+
geom_point(data=estaciones_mapa, aes(x=longitud, y=latitud, col=estacion), size=3)+
scale_color_viridis_d()+
theme(legend.position = "none",
axis.line = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank())+xlab('')+ylab('')+
geom_label_repel(data=estaciones_mapa, aes(x=longitud, y=latitud, label=estacion))
ggsave(filename = paste("Data/Provincias/",r,"/",p,"/1Mapa_",p,".png",sep=""),
plot=m, dpi = 300)
rm(location, estaciones_mapa, map, m)
## Descarga datos de concentracion ----------
df_descarga <- df_estaciones %>% filter(provincia==p)
cat(nrow(df_descarga)," archivos a descargar","\n", sep = "")
for (d in 1:nrow(df_descarga)){
sitio <- df_descarga$estacion[d] %>% str_remove_all(" |'")
destino <- paste("Data Scrap/Provincias/",r,"/",p,"/",
sitio,"_",
df_descarga$pollutant[d],
".csv",sep="")
tryCatch(
{
download.file(df_descarga$url_descarga[d],destfile = destino)
},
error = function(cond) return(NULL))
}
rm(df_descarga)
}
}
# Crear ReadME ---------
cat("Este directorio contiene los datos descargados del SINCA ",
"para todas las estaciones con información disponible. \n\n",
"Fecha de descarga de los datos: ",format(Sys.time(),'%d-%m-%Y'),"\n\n",
"Los datos descargados son a nivel HORARIO, por estacion y contaminante \n\n",
"Cada carpeta a nivel de provincia incluye un mapa con la ubicación ",
"de las estaciones de monitoreo \n",
file = "Data/Provincias/ReadMe.txt", sep="", append = F)
## EoF
|
aa0598bbdf7cbc422f71cb9ffd3bcfc777419f4a
|
f2643256c6611d7de0db96d162f594388c2c2c50
|
/analyses/Trial 2/uniquemobilenumbers.R
|
15819b22ea95e8e90f8e341a3219c5a93e875d3e
|
[] |
no_license
|
raubreywhite/trial_dofiles
|
e06a5b3b39e9195eda79dd33856d67c918ec4053
|
eface3b83b107cf7e621b3c654e65b5cbd45b711
|
refs/heads/master
| 2022-06-14T03:26:17.492945
| 2022-06-02T07:27:04
| 2022-06-02T07:27:04
| 114,857,557
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,065
|
r
|
uniquemobilenumbers.R
|
nrow(d)
nrow(d[(ident_dhis2_control==F &
ident_dhis2_booking==T)])
as.numeric(d$mobile)
uniquemoblienumbers <-unique(d[ident_dhis2_control==F &
ident_dhis2_booking==T,
c("bookyearmonth",
"bookorgdistrict",
"mobile")])
openxlsx::write.xlsx(uniquemoblienumbers,file.path(FOLDER_DATA_RESULTS,
"satisfaction",
"uniquemobilenumbers_districts.xlsx"))
#Feb 3, 2019
#Manual check for 050 numbers is 175
#Manual check for 052 numbers is 466
#Manual check for 053 numbers is: 171
#Manual check for 054 numbers is: 126
#Manual check for 055 numbers is: 40
#Manual check for 058 numbers is: 148
#these total to: 1126 different out of 32323
d[,mobile_number_israeli:=FALSE]
vars<- uniquemoblienumbers[stringr::str_detect(uniquemoblienumbers, "^052")]
# for(i in vars){
#
#
#
# }
|
58a67329b215c28d68415397f0d2775428a6a081
|
6d98dfe9d7ed3319ccda5cc7978885114c68c09d
|
/man/fseq_env.Rd
|
e7c9166172324311caea7856aa5a786d4194d5d6
|
[
"MIT"
] |
permissive
|
MyKo101/mpipe
|
44c552abc758f316ba6aed2b03f933a4afa43383
|
d1e16c77525288123b0491b836bcac05f0c0b790
|
refs/heads/main
| 2023-02-22T04:27:48.580286
| 2021-01-31T01:32:54
| 2021-01-31T01:32:54
| 256,473,147
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,973
|
rd
|
fseq_env.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fseq_env.R
\name{fseq_env}
\alias{fseq_env}
\alias{fseq_get_env}
\alias{fseq_print_env}
\alias{fseq_set_env}
\alias{fseq_check_env}
\alias{fseq_copy_fun}
\alias{fun_var_env}
\title{Alter the environments of fseq functions}
\usage{
fseq_get_env(fn_fseq, i)
fseq_print_env(fn_fseq, i)
fseq_set_env(fn_fseq, i, new_env)
fseq_check_env(new_env)
fseq_copy_fun(fn_fseq)
fun_var_env(fun, variable, value)
}
\arguments{
\item{fn_fseq}{a function of class \code{fseq} (with an appropriate environment).}
\item{i}{a numeric indicating which function in the \code{fseq} to refer to. If
0, then the parent \code{fseq} environment will be used.}
\item{new_env}{environment to be assigned to the relevant environment in \code{fn_fseq}.}
\item{fun}{function to edit the environment of.}
\item{variable}{name of variable (as string) to assign value to within all
environments in \code{fun}.}
\item{value}{value to be assigned to \code{variable} in all environments in \code{fun}}
}
\description{
These functions allows access to the environments contained
within \code{fseq} objects. \code{fseq} objects are functions that have been
created using the pipeline with a \code{.} as the left-hand side.
\code{fseq} functions contain a major environment for the overall
function (i.e the environment of \code{fn_fseq}) and multiple
minor environments. The minor environments are the
function-environments of each of functions that make up the
functional sequence.
For example, if we define \code{a <- . \%>\% add(2) \%>\% multiply_by(3)},
then \code{a()} will be a function that has an environment, this is the
major environment. Within \code{a()}, there is a list of functions that
make up the function sequence, \code{add(.,2)} and \code{multiply_by(.,3)}.
Each of these will also have their own environment, the minor
environments. Therefore, \code{a()} will have 3 environments associated
with it.
}
|
539c0ee8880a839dafda9115927287a15c7be708
|
d834d99c197aab4256952dcb9b6575f3f6cd1282
|
/volumes_interpolation.R
|
e33332ec634310f8e072ff3f91372b3a9381b269
|
[
"MIT"
] |
permissive
|
rmsandu/R-data-analysis
|
07a7760de2ae5d21477e0f29d818651f7a6f2cb1
|
15c522e56df068354d6140bee6c7189412488275
|
refs/heads/master
| 2022-09-17T23:56:51.489101
| 2020-06-04T17:56:30
| 2020-06-04T17:56:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 313
|
r
|
volumes_interpolation.R
|
library(tableone)
library(dplyr)
library(ggpubr)
library(xlsx)
setwd("C:/develop/data-analysis")
data_ellispoid <- read.csv("C:/develop/data-analysis/Ellipsoid_Brochure_Info.csv")
print(data_ellispoid)
data_radiomics <- read.csv("C:/develop/data-analysis/Radiomics_MAVERRIC_111119.csv")
print(data_radiomics)
|
35ef9303dd42b95527ab01a78ee21501a77d3254
|
e1c092c2a59f0998366612a309f9147cbcee423a
|
/R/d.e.mcmc.R
|
404e699a1077491b7ac6406bd1821e1aa5da081a
|
[] |
no_license
|
SandaD/MCMCEnsembleSampler
|
58d88cd0314628229908764ac3f5d65450af868f
|
7b5cdddc139c66a0c5d139b5fa74252979e4a43c
|
refs/heads/master
| 2020-07-05T01:30:45.217542
| 2018-04-25T14:52:36
| 2018-04-25T14:52:36
| 74,132,038
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,765
|
r
|
d.e.mcmc.R
|
## ................................
## MCMC Ensemble Sampler
## ..........................
## Sanda Dejanic - R implementation of Ter Braak's differential evolution move
## ...........................
##' MCMC Ensemble sampler with the differential evolution jump move
##'
##' Markov Chain Monte Carlo sampler: using the differential evolution jump move (implementation of the Ter Braak differential evolution)
##'
##' @param f a funtion to sample from
##' @param max.iter the maximum number of function evaluations
##' @param n.walkers the number of walkers (ensemble size)
##' @param n.dim the number of parameters to sample
##' @param init.range a matrix(nrow=n.dim, ncol=2) defying the initial range for all the parameters, every row of the matrix should contain the lower and the upper limit
##' @param ... all additional agruments of f
##'
##' @return List containing: \code{samples[n.walkers,chain.length,n.dim] and $log.p[n.walkers,chain.length]}
##' @export
d.e.mcmc = function(f, max.iter, n.walkers, n.dim, init.range, ...) {
## initial values
chain.length = max.iter%/%n.walkers
log.p = matrix(NA,nrow=n.walkers,ncol=chain.length)
log.p.old = rep(NA,n.walkers)
ensemble.old = matrix(NA, nrow=n.walkers, ncol=n.dim)
ensemble.new = matrix(NA, nrow=n.walkers, ncol=n.dim)
samples = array(NA, dim=c(n.walkers,chain.length,n.dim))
mcmc.object = array(NA, dim=c(n.walkers,chain.length,n.dim+1))
for(k in 1:n.walkers) {
for(g in 1:n.dim){
ensemble.old[k,g] = runif(1, init.range[g,1], init.range[g,2])
}
log.p.old[k] = f(ensemble.old[k,], ...)
}
log.p[,1]=log.p.old
samples[ , 1, ] = ensemble.old
## the loop
for (l in 2:chain.length){
for (n in 1:n.walkers){
z = 2.38/sqrt(2*n.dim)
if (l%%10 == 0) {
z = 1
}
a = sample((1:n.walkers)[-n], 1)
b = sample((1:n.walkers)[-c(n,a)], 1)
par.active.1 = ensemble.old[a,]
par.active.2 = ensemble.old[b,]
ensemble.new[n,] = ensemble.old[n,] + z*(par.active.1-par.active.2)
log.p.new = f(ensemble.new[n,], ...)
if(!is.finite(log.p.new)){acc=0}
else {acc = exp(log.p.new - log.p.old[n])}
test = runif(1)
if (acc > test) {
samples[n,l,] = ensemble.new[n,]
ensemble.old[n,] = ensemble.new[n,]
log.p[n,l] = log.p.new
log.p.old[n] = log.p.new
} else {
samples[n,l,] = ensemble.old[n,]
log.p[n,l] = log.p.old[n]
}
}
}
mcmc.list = list(samples=samples, log.p=log.p)
return(mcmc.list)
}
|
7d98b93f2add7f724cfda022d85b14b2a5e2470a
|
50066dae4216d17bd6f0dcb9a11d872e73246eb6
|
/man/choose_interp_extrap_method.Rd
|
a9af70617c2905fd724c25a84162e5163579d02a
|
[] |
no_license
|
cran/PKNCA
|
11de9db2cb98279c79d06022415b8772e7c1f5ea
|
8f580da3e3c594e4e1be747cb2d8e35216784ed2
|
refs/heads/master
| 2023-05-10T16:54:19.131987
| 2023-04-29T18:30:02
| 2023-04-29T18:30:02
| 48,085,829
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,544
|
rd
|
choose_interp_extrap_method.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interp_extrap_helpers.R
\name{choose_interp_extrap_method}
\alias{choose_interp_extrap_method}
\title{Choose a method for calculation in the interval between concentrations}
\usage{
choose_interp_extrap_method(conc, time, interp_method, extrap_method, tmax)
}
\arguments{
\item{conc}{A vector of concentrations (\code{NA} values are not allowed)}
\item{time}{A vector of times (\code{NA} values are not allowed)}
\item{interp_method}{Method to use for interpolation between time points}
\item{extrap_method}{Method to use for extrapolation after the last time
point above (an AUC calculation method)}
\item{tmax}{Time of maximum concentration}
}
\value{
A character vector of extrapolation methods to use between each
\code{conc} and after the last \code{conc}. Values will be one or more of
"linear" (use linear interpolation), "log" (use log interpolation), "zero"
(the value is zero), and the last value may be "clastpred", "clastobs", or
"zero" indicating extrapolation from tlast using lambda.z and clast,pred or
clast,obs, or zero.
}
\description{
This function should be used for any interpolation/extrapolation function. It
will standardize the method of choosing which method to use for interpolation
and extrapolation.
}
\examples{
PKNCA:::choose_interp_extrap_method(
conc=c(1, 2, 4, 2, 1, 0, 0),
time=0:6,
interp_method="lin up/log down",
extrap_method="aucinf.obs"
)
}
\keyword{Internal}
|
af9eb464b5a13d8ec884984a38e279b2d98a1fb2
|
e189d2945876e7b372d3081f4c3b4195cf443982
|
/man/HF_TASKS_AUTO.Rd
|
3e3bafe30ebf10c1abc8e387d9c62b3843cbe869
|
[
"Apache-2.0"
] |
permissive
|
Cdk29/fastai
|
1f7a50662ed6204846975395927fce750ff65198
|
974677ad9d63fd4fa642a62583a5ae8b1610947b
|
refs/heads/master
| 2023-04-14T09:00:08.682659
| 2021-04-30T12:18:58
| 2021-04-30T12:18:58
| 324,944,638
| 0
| 1
|
Apache-2.0
| 2021-04-21T08:59:47
| 2020-12-28T07:38:23
| null |
UTF-8
|
R
| false
| true
| 237
|
rd
|
HF_TASKS_AUTO.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blurr_hugging_face.R
\name{HF_TASKS_AUTO}
\alias{HF_TASKS_AUTO}
\title{HF_TASKS_AUTO}
\usage{
HF_TASKS_AUTO()
}
\value{
None
}
\description{
An enumeration.
}
|
7bb9bb23a658d71d5db68f4d9424ce8916ab679b
|
26a0244b2ce388e20de3ec4683a8a9e51e4e85a3
|
/man/redmm.Rd
|
6b879747d9990d9763006baad5d0f301e4452f17
|
[] |
no_license
|
covaruber/sommer
|
aeb064451c11c4de3e91e3d353909a3c35770040
|
00b16ebe469472cfa81a07fcb7235ed5b82c28d0
|
refs/heads/master
| 2023-08-17T09:24:22.399784
| 2023-08-09T00:01:26
| 2023-08-09T00:01:26
| 161,532,081
| 26
| 20
| null | 2021-09-23T18:32:51
| 2018-12-12T18:59:08
|
R
|
UTF-8
|
R
| false
| false
| 2,050
|
rd
|
redmm.Rd
|
\name{redmm}
\alias{redmm}
\title{Reduced Model Matrix}
\description{
`redmm` uses a feature matrix M from a random variable x and performs a singular value decomposition or Cholesky on M and creates a model matrix for x.
}
\usage{
redmm(x, M = NULL, Lam=NULL, nPC=50, cholD=FALSE, returnLam=FALSE)
}
\arguments{
\item{x}{as vector with values to form a model matrix or the matrix itself for an effect of interest.}
\item{M}{a matrix of features explaining the levels of x.}
\item{Lam}{a matrix of loadings in case is already available to avoid recomputing it.}
\item{nPC}{number of principal components to keep from the matrix of loadings to form the model matrix.}
\item{cholD}{should a Cholesky or a Singular value decomposition should be used. The default is the SVD.}
\item{returnLam}{should the function return the loading matrix in addition to the incidence matrix. Default is FALSE.}
}
\value{
\describe{
\item{$S3}{ A list with 3 elements:
1) The model matrix to be used in the mixed modeling.
2) The reduced matrix of loadings (nPC columns).
3) The full matrix of loadings.
}
}
}
\references{
Covarrubias-Pazaran G (2016) Genome assisted prediction of quantitative traits using the R package sommer. PLoS ONE 11(6): doi:10.1371/journal.pone.0156744
}
\author{
Giovanny Covarrubias-Pazaran
}
\examples{
####=========================================####
#### For CRAN time limitations most lines in the
#### examples are silenced with one '#' mark,
#### remove them and run the examples
####=========================================####
data(DT_technow)
DT <- DT_technow
Md <- Md_technow
M <- tcrossprod(Md)
xx = with(DT, redmm(x=dent, M=M, nPC=10))
# ans <- mmec(GY~1,
# # new model matrix instead of dent
# random=~vsc(isc(xx$Z)),
# rcov=~units,
# data=DT)
# summary(ans)$varcomp
# u = xx$Lam * ans$uList[[1]] # change * for matrix product
}
\seealso{The core functions of the package \code{\link{mmec}} }
|
daa3bb787505b5a77aea94205a929196ab5713d2
|
abcc7db0e92d17b720af9bc98f4cdac6739e1cf6
|
/ASA_CSSA_SSSA/R/cbPalette.R
|
367243571c90f1569aea59ff30e9aa6e69dfe378
|
[] |
no_license
|
dakotajudo/ASA_CSSA_SSSA
|
b4a93a1b7b9172c941abffca95ba278559279948
|
0a66202f93bdebfd8d411e288cd8d2c9fbb33f42
|
refs/heads/master
| 2021-09-27T11:56:47.419032
| 2021-09-10T14:08:11
| 2021-09-10T14:08:11
| 14,312,149
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 187
|
r
|
cbPalette.R
|
cbPalette <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#0072B2", "#D55E00", "#F0E442","#CC79A7","#000000","#734f80", "#2b5a74", "#004f39", "#787221", "#003959", "#6aaf00", "#663cd3")
|
d40afc6755c652cbf1759c41ea53a10b8a8aa3d5
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/cusum/R/cusum.R
|
d8e3afafe5656e6877a7df8fba26846000f95e0c
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,147
|
r
|
cusum.R
|
#' Non-risk-adjusted CUSUM Charts
#'
#' Calculate non-risk-adjusted CUSUM charts for performance data
#' @export
#' @import checkmate
#' @import stats
#' @import graphics
#' @param failure_probability Double. Baseline failure probability
#' @param patient_outcomes Integer. Vector of binary patient outcomes (0,1)
#' @param limit Double. Control limit for signalling performance change
#' @param weights Double. Optional vector of weights, if empty, standard CUSUM weights are calculated with weights_t
#' @param odds_multiplier Double. Odds multiplier of adverse event under the alternative hypothesis (<1 looks for decreases)
#' @param reset Logical. Reset the CUSUM after a signal to 0; defaults to TRUE
#' @examples
#'
#' # control limit can be obtained with cusum_limit_sim(),
#' # here it is set to an arbitrary value (2.96)
#'
#' # CUSUM of in-control process
#' # simulate patient outcomes
#' set.seed(2046)
#' patient_outcomes <- as.logical(rbinom(n = 100, size = 1, prob = 0.05))
#'
#'
#' cs_ic <- cusum(
#' failure_probability = 0.05,
#' patient_outcomes,
#' limit = 2.96
#' )
#'
#' # CUSUM of out-of-control process
#' # simulate patient outcome
#' set.seed(2046)
#' patient_outcomes <- as.logical(rbinom(n = 100, size = 1, prob = 0.2))
#'
#' cs_oc <- cusum(
#' failure_probability = 0.05,
#' patient_outcomes,
#' limit = 2.96
#' )
cusum <- function(failure_probability, patient_outcomes, limit, weights = NULL, odds_multiplier = 2, reset = TRUE) {
## Check user input ####
assert_numeric(failure_probability, lower = 0, upper = 1, finite = TRUE, any.missing = FALSE, len = 1)
if (failure_probability > 0.5) {
failure_probability <- 1 - failure_probability
warning("Accepted failure probability failure_probability will be recoded to 1-failure_probability when > 0.5.")
}
patient_outcomes <- as.integer(patient_outcomes)
assert_integer(patient_outcomes, lower = 0, upper = 1, any.missing = FALSE, min.len = 1)
assert_numeric(limit, finite = TRUE, any.missing = FALSE, len = 1)
if (length(weights) > 0){
assert_numeric(weights, lower = -1, upper = 1, finite = TRUE, any.missing = FALSE, min.len = 1)
if (length(weights) != length(patient_outcomes)) {
stop("Length weights and patient outcomes of unequal size.")
}
}
assert_numeric(odds_multiplier, lower = 0, finite = TRUE, any.missing = FALSE, len = 1)
if (odds_multiplier < 1) {
# message("CUSUM is set to detect process improvements (odds_multiplier < 1). ")
if (limit > 0) {
warning("Control limit should be negative to signal process improvements.")
}
}
if (odds_multiplier == 1) {
stop("CUSUM is set to detect no process change (odds_multiplier = 1).")
}
if (odds_multiplier > 1){
if (limit < 0) {
warning("Control limit should be positive to signal process deteriorations.")
}
}
assert_logical(reset, any.missing = FALSE, len = 1)
## Calculate CUSUM Chart ####
npat <- length(patient_outcomes)
ct <- 0
cs <- matrix(0, nrow = npat, ncol = 5)
if (length(weights) == 0){
w <- weights_t(patient_outcomes,
probability_ae = failure_probability,
odds_multiplier)
} else {
w <- weights
}
for (ii in 1:npat) {
if (odds_multiplier > 1) {
ct <- max(0, ct + w[ii])
if (ct >= limit) {
cs[ii, 4] <- 1 # store signal
if (reset == TRUE) ct <- 0
} else {
cs[ii, 4] <- 0
}
} else if (odds_multiplier < 1) {
ct <- min(0, ct - w[ii])
if (ct <= limit) {
# test for signal
cs[ii, 4] <- 1 # store signal
if (reset == TRUE) ct <- 0
} else {
cs[ii, 4] <- 0
}
}
cs[ii, 1] <- ii # store patient id
cs[ii, 2] <- failure_probability
cs[ii, 3] <- ct # store CUSUM value
}
cs[, 5] <- limit
cs <- as.data.frame(cs)
names(cs) <- c("t", "failure_probability", "ct", "signal", "limit")
class(cs) <- c("cusum", "data.frame")
return(cs)
}
|
94f7e2208abe3c8462d484e5486b9e4b5cec7255
|
f060d34e74ce23f1449d4bb78067bab3dcc49347
|
/man/G.Rd
|
599067d8de040d089e5ffc5da9a198873375ccca
|
[] |
no_license
|
tamartsi/MetaCor
|
8bd34816049ff9573dadc0910f39961edc6ec819
|
9e64e0d5359411fb17f18bf87db7db4010c53cd9
|
refs/heads/master
| 2021-01-10T02:51:06.464548
| 2018-11-21T20:30:50
| 2018-11-21T20:30:50
| 49,463,770
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 593
|
rd
|
G.Rd
|
\name{G}
\alias{G}
\docType{data}
\title{
A simulated genotype matrix
}
\description{
A matrix of simulated genotypes to help demonstrate the package usage.
}
\usage{data("G")}
\format{
A data frame of simulated genotype data for 6000 individuals. IDs and 10 allele counts are provided for each individual.
}
\details{
%% ~~ If necessary, more details than the __description__ above ~~
}
\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
}
\references{
%% ~~ possibly secondary sources and usages ~~
}
\examples{
data(G)
head(G)
}
\keyword{datasets}
|
7597ccac92683ed1bdb0fe987d96bafada60e3cc
|
6febf32c916d5734ff34df50369a0bc73b9178f2
|
/R/reactive_power_control.R
|
214fd5bb223fb416a788ac1981ae6552ba6977be
|
[] |
no_license
|
asiono/rein
|
e190efbb0579685608bd48a171504016b2d0b7fe
|
ff837b696edc083a6ecf0c9435f1d8825a9f0c5d
|
refs/heads/master
| 2021-05-12T07:21:37.391867
| 2018-03-08T13:48:05
| 2018-03-08T13:48:05
| 117,106,249
| 0
| 0
| null | 2018-01-12T09:40:12
| 2018-01-11T13:47:56
|
R
|
UTF-8
|
R
| false
| false
| 3,074
|
r
|
reactive_power_control.R
|
################################################################################
#' @title reactive_power_control
#' @description change feed-in power based on specific power factor
#' @param lines lines data of the grid
#' @param S_cal omplex vector giving all powers in internal notation for cal_nodes
#' @param verbose verbosity level
#' @return This function change the value of active and reactive power of the feed-in.
#' The ouput is new complex number of feed-in power.
################################################################################
reactive_power_control <- function(lines, S_cal, verbose = 0) {
#calculate node distance from LV side of transformer
grid_begin <- lines[which(lines$type == 'trafo'), 'begin']
PF_matrix <- cbind(lines[,c("begin","end")], line_length = get_line_length(lines$element))
for (i in 1:nrow(PF_matrix)) {
if (PF_matrix$begin[i] != grid_begin) {
repeat {
PF_matrix$line_length[i] <- PF_matrix$line_length[i] + PF_matrix$line_length[which(PF_matrix$end == PF_matrix$begin[i])]
PF_matrix$begin[i] <- PF_matrix$begin[which(PF_matrix$end == PF_matrix$begin[i])]
if (PF_matrix$begin[i] == grid_begin) {
break
}
}
} else {
PF_matrix$line_length[i] <- PF_matrix[which(PF_matrix$begin == grid_begin), "line_length"] }
}
PF_matrix <- merge(PF_matrix, as.data.frame(S_cal), by.x = "end", by.y = 0)
#setting power factor
#power factor are set based on distance from transformer and load
#+------------------+-------------------------+
#| | Load balance |
#+ +-------------------------+
#| Distance | low | medium | high |
#+ +-------+---------+-------+
#| | <3.6kW | 3.6-13.8kW | >13.8kW |
#+------------------+-------+---------+-------+
#| short | <50m | 1.0 | 1.0 | 1.0 |
#+--------+---------+-------+---------+-------+
#| medium | 50-100m | 1.0 | 1.0 | 0.95 |
#+--------+---------+-------+---------+-------+
#| far | >100m | 1.0 | 0.95 | 0.9 |
#+--------+---------+-------+---------+-------+
PF_matrix$PF <- 0
for (i in 1:nrow(PF_matrix)) {
if (Re(PF_matrix$S_cal[i]) < 3680) {
PF_matrix$PF[i] = 1
} else if (Re(PF_matrix$S_cal[i]) <= 13800) {
if (PF_matrix$line_length[i] > 0.1) {
PF_matrix$PF[i] = 0.95
} else PF_matrix$PF[i] = 1
} else if (PF_matrix$line_length[i] < 0.05) {
PF_matrix$PF[i] = 1
} else if (PF_matrix$line_length[i] < 0.1) {
PF_matrix$PF[i] = 0.95
}else PF_matrix$PF[i] = 0.9
}
#apply power factor setting, change the S_cal value
for (i in 1:nrow(PF_matrix)) {
if (Mod(PF_matrix$S_cal[i]) != 0) {
PF_matrix$S_cal[i] <- complex(real = Mod(PF_matrix$S_cal[i])*PF_matrix$PF[i],
imaginary = (sqrt(Mod(PF_matrix$S_cal[i])*PF_matrix$PF[i])^2*((1/PF_matrix$PF[i])^2 - 1)))
}
}
S_cal <- PF_matrix$S_cal
names(S_cal) <- PF_matrix$end
return(S_cal)
}
|
b900e2fc801e05a67c8abdba551d815665076135
|
230fabadbc7881e514a9e3288be18743026f7cc3
|
/man/ApplyFactorRange.Rd
|
7c5dbae9472c1024e8cd613276860fed75510438
|
[] |
no_license
|
cran/rrepast
|
d15e5f00a973c569957c26671c3e9002a1c51ccf
|
46ca64781419e5c475521e0ade9e9786b6427cd1
|
refs/heads/master
| 2021-05-04T11:21:56.188742
| 2020-02-19T05:00:05
| 2020-02-19T05:00:05
| 48,087,733
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 514
|
rd
|
ApplyFactorRange.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RRepast.R
\name{ApplyFactorRange}
\alias{ApplyFactorRange}
\title{Corrects the LHS design matrix}
\usage{
ApplyFactorRange(design, factors)
}
\arguments{
\item{design}{The LHS design matrix}
\item{factors}{THe collection of factors}
}
\value{
The corrected design matrix
}
\description{
Correct the LHS sampling matrix for a
specific range applying the lambda function. The default
value of 'lambda' is 'qunif'.
}
|
852ae2bb8e33afe37cc04a233da29193eb16c6df
|
e668745c508439e49bdde3ce44650d61a050ea2c
|
/tests/ggbio_plotGrandLinear.R
|
82c2b79fc5dd45326dbac5b3cf555579f27392b7
|
[
"MIT"
] |
permissive
|
kevinrue/NGS
|
6d89145d95b3fab374403a3e93cf2f27d5b0d6f4
|
626ba681b382341364c32df2dbb3f79e2e29430c
|
refs/heads/master
| 2021-05-16T03:10:04.160214
| 2018-04-30T16:52:47
| 2018-04-30T16:52:47
| 19,390,134
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 436
|
r
|
ggbio_plotGrandLinear.R
|
library(ggbio)
library(GenomicRanges)
df <- data.frame(
seqnames = c(rep("chr1", 1200), rep("chr2", 1000), rep("chr3", 800)),
start = c(1:1200, 1:1000, 1:800),
distance = abs(rnorm(3000)) * 1E3
)
gr <- GRanges(
seqnames = c(rep("chr1", 1200), rep("chr2", 1000), rep("chr3", 800)),
ranges = IRanges(start = c(1:1200, 1:1000, 1:800), width = 5),
distance = rnorm(3000)
)
plotGrandLinear(gr, aes(y = distance))
|
5673ce2ea0a98de4dd90320dcf89fc5efe862d9d
|
68ba96b2062f8a50fd5768ec1b9617012f22ee27
|
/R/AIC_lik.R
|
b37aa268e5cf229dc81a4e4a71548bd314b0a6f2
|
[
"MIT"
] |
permissive
|
adsteen/Lloyd.et.al.Cell.abundance.metaanalysis
|
16076ca3d5cba6a6bb68a9ff8f4d2234d83fb426
|
65803ccebbd1c2ff6c6fc95b337b1b198f6f82ed
|
refs/heads/master
| 2016-09-06T18:10:07.337523
| 2013-11-07T03:12:49
| 2013-11-07T03:12:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 414
|
r
|
AIC_lik.R
|
##' Returns liklihood of models relative to the best (based on AIC)
##'
##' @description Calculates relative liklihood of models based on Akaike Information Criterion
##' @param AIC_df data frame of AIC results, as provided by the AIC function
##' @return The input data frame, with a `liklihood` column
AIC_lik <- function(AIC_df) {
AIC_df$rel_liklihood <- exp((min(AIC_df$AIC)-AIC_df$AIC)/2)
AIC_df
}
|
377b8977e9f99e70f6eed05f4d3db81cc321c70b
|
0a98b5f5a492a19f13a91617370f61dcba9437f6
|
/ch-9-scraping/httpdate.r
|
2399692fa1e10f7a75693c3887a44ccb723b686b
|
[] |
no_license
|
petermeissner/Wiley-ADCR
|
3fd75d32773497bfb3805aa3c32b31be81dcbb58
|
97f1e4ec88cad81b039e7ab6bb5e58091aba18d5
|
refs/heads/master
| 2021-01-21T08:21:00.977759
| 2016-07-22T09:48:13
| 2016-07-22T09:48:13
| 63,941,264
| 1
| 0
| null | 2019-11-21T23:42:55
| 2016-07-22T09:25:56
|
HTML
|
UTF-8
|
R
| false
| false
| 1,208
|
r
|
httpdate.r
|
HTTPdate <- function(time="now", type=c("rfc1123","rfc850","ascitime")[1]){
if(time=="now") {
tmp <- as.POSIXlt(Sys.time(),tz="GMT")
}else{
tmp <- as.POSIXlt(as.POSIXct(time),tz="GMT")
}
nday <- c("Sun", "Mon" , "Tue" , "Wed", "Thu" , "Fri" , "Sat")[tmp$wday+1]
month <- tmp$mon+1
nmonth <- c("Jan" , "Feb" , "Mar" , "Apr", "May" , "Jun" , "Jul" , "Aug", "Sep" , "Oct" , "Nov" , "Dec")[month]
mday <- formatC(tmp$mday, width=2, flag="0")
hour <- formatC(tmp$hour, width=2, flag="0")
min <- formatC(tmp$min , width=2, flag="0")
sec <- formatC(round(tmp$sec) , width=2, flag="0")
if(type=="rfc1123"){
return(paste0(nday,", ",mday," ",nmonth," ",tmp$year+1900," ",hour,":",min,":",sec," GMT"))
}
if(type=="rfc850"){
message("not implemented yet")
return(NULL)
}
if(type=="ascitime"){
message("not implemented yet")
return(NULL)
}
}
file.date <- function(filename,timezone=Sys.timezone()) {
as.POSIXlt(min(unlist(file.info(filename)[4:6])),origin = "1970-01-01", tz = timezone)
}
# usage:
# HTTPdate()
# HTTPdate( file.date("812.pdf") )
|
8bac11aa1a3f960ed8d79c935147a81e620460c0
|
8dd269c185df1d1400f597d12a8e0224a34f93ee
|
/Sentiment Analysis(Eng).R
|
dc6acedfa00235117352551981ea75b8f1772037
|
[] |
no_license
|
ErikWallentin/Sentiment-analysis-of-the-company-Nintendo
|
804e187409e5de59f6402d94ceca31e81f3d7559
|
1c6d2eba3beb613fc5e7f35761f1708a6757516d
|
refs/heads/master
| 2020-04-24T19:10:03.809481
| 2019-02-23T10:56:09
| 2019-02-23T10:56:09
| 172,140,402
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,385
|
r
|
Sentiment Analysis(Eng).R
|
#####################################################################################################
# Many companies and organizations benefits greatly from finding out what the public thinks about them.
# One way to do that is to investigate what people are writing about them on social media.
# A large social platform, which is actively used by more than 300 million people worldwide, is Twitter.
# An analysis of tweets aimed at companies/organizations through e.g. a given hashtag, can help
# companies/organizations to get a clearer view of what the public's attitude towards them is.
# When launching a new product, this analysis method is particularly effective, as it quickly and
# easily provides feedback from the public about what they think about the given product.
# This type of analysis is called sentiment analysis and the goal of this project is to find out what
# people write on Twitter regarding the company Nintendo, who just announced that Nintendo of
# America's president, Reggie Fils-Aime, will retire and be replaced by Doug Bowser.
# We therefore want to find out about the public's reaction to this announcement and this
# project will answer the question of what people write about Nintendo and how its attitude towards
# the company is, through an analysis of 1000 English-speaking tweets that include the Nintendo hashtag.
# The tweets were fetched and saved in a csv-file with help of Twitter's API and the R package "rtweet".
# In order to find out what's written about the company on Twitter, two graphs are presented,
# a barplot and a wordcloud, containing information about which words most frequently occur among the
# 1000 tweets. Similar words, e.g. run, running, runs etc is categorized under one name with help of a
# method called stemming. This means that some words in the graphs may have slightly different
# spelling as well as grammatical bending. However, this is a price I think it's worth paying to get
# the opportunity to merge similar words under one name.
# An analysis of the words presented in the two graphs can provide an acceptable understanding of what
# the public thinks about Nintendo after the presidential change announcement. However, there is a
# more efficient way to find out if the 1000 tweets can be categorized as positive, negative, fearful,
# etc. by a method that goes through each word of each tweet and gives a score to each word regarding
# if they were positive, negative, fearful, etc.
# After each word in each tweet is analysed by this method, we can through a barplot visualise for
# example how many points the "positive" category has accumulated. The method in question can
# categorize a word into ten different categories, and it's possible for a word to be categorized
# into more than one category.
# This project is divided into three separated parts where the first is dedicated to preparing the
# data, since text from social media is often data that needs to be prepared and cleaned thoroughly
# before the analysis phase. Part two is about creating the two graphs that describe which words
# occur most frequently in the tweets. Finally, the final part is all about the implementation of our
# sentiment analysis, were the goal is to through these 1000 tweets find out the general attitude
# towards the company Nintendo.
# The project concludes with a brief comment on the result achieved.
#####################################################################################################
#######################################################################
###########################Prepare the data############################
#######################################################################
# R-package necessary to execute the project.
install.packages("quanteda")
install.packages("wordcloud2")
install.packages("syuzhet")
library(quanteda)
library(wordcloud2)
library(syuzhet)
# Import the csv file named "TweetsNintendo" which contains 1000 tweets.
Data <- read.csv(file.choose(), header=TRUE, sep=",", stringsAsFactors = F)
# Remove links in the tweets.
Data$Tweet <- gsub(" ?(f|ht)(tp)(s?)(://)(.*)[.|/](.*)", "", Data$Tweet)
# Remove the code that represent emojis in the tweets.
Data$Tweet <- gsub("<.*?>", "", Data$Tweet)
# Remove words beginning with "#" or "@" in the tweets.
# If this isn't done, the word #Nintendo would for example appear in each tweet, and the character "@"
# is used as a link to another twitter-user. Words starting with @ are therefore superfluous as they
# don't help us extract any information about what the person who wrote the tweet thinks about the
# company Nintendo.
Data$Tweet <- gsub("[@#]\\w+ *", "", Data$Tweet)
# Tokenize every word in the tweets, and remove points, symbols, hyphens and words that only consists
# of numbers.
# In order to use the steaming function, every word needs to become a separate token
Data.tokens <- tokens(Data$Tweet, what = "word",
remove_numbers = TRUE, remove_punct = TRUE,
remove_symbols = TRUE, remove_hyphens = TRUE)
# Convert all tokens to lowercase.
Data.tokens <- tokens_tolower(Data.tokens)
# Remove english "stop-words" in our tweets.
Data.tokens <- tokens_select(Data.tokens, stopwords(),
selection = "remove")
# Perform stemming on our tokens.
Data.tokens <- tokens_wordstem(Data.tokens, language = "english")
# Create a document-feature matrix and then convert it to a matrix.
# When converting, the transpose function is used so that the columns contains the tweets and the rows
# contains the words.
dfm <- dfm(Data.tokens, tolower = FALSE)
dfm <- t(as.matrix(dfm))
#######################################################################
###Create two graphs containing the most frequently occurring words####
#######################################################################
# Create a barplot that illustrates which words occur most frequently in our 1000 tweets.
# Only words that occur at least 40 times are shown in the graph.
words <- rowSums(dfm)
words <- subset(words, words >= 40)
barplot(words, las = 2, main = 'Most frequently occurring words', col = rainbow(40), ylim = c(0,350))
# Create a "wordcloud" that illustrates which words occur most frequently in our 1000 tweets.
# By hovering the mouse over a word in the graph, the number of times the given word appears in the
# 1000 tweets is shown.
words_sorted <- sort(rowSums(dfm), decreasing = TRUE)
words_df <- data.frame(names(words_sorted), words_sorted)
colnames(words_df) <- c('word', 'freq')
head(words_df)
cloud <- wordcloud2(words_df, size = 0.7, shape = 'circle', minSize = 2, rotateRatio = 0.30)
cloud
#######################################################################
##########################Sentiment Analysis###########################
#######################################################################
# Perform sentiment analysis on our 1000 tweets.
tweets <- iconv(Data$Tweet)
Sentiment_Scores <- get_nrc_sentiment(tweets)
# Create a barplot that reports the result of our sentiment analysis.
par(mar=c(6,4,4,2))
barplot(colSums(Sentiment_Scores), las = 2, col = rainbow(10),
main = 'Sentiment analysis of #Nintendo tweets', ylab = 'Count', ylim = c(0,1000))
#####################################################################################################
# The result of the sentiment analysis shows that the attitude towards the company Nintendo is mostly
# positive. This is in line with what one might expect after inspection of the two graphs containing
# information about the words that occur most frequently in the 1000 tweets. We can see that words
# that are positively charged occur to a greater extent than negatively charged words.
# An explanation of why the bars "positive" and "anticipation" are the greatest in the graph could be
# explained by the fact that many may thank the old president, Reggie Fils-Aime, for a well done work
# at Nintendo of America, and that many are excited about what the new president, Doug Bowser, can
# accomplish.
#####################################################################################################
|
11d3c3002fb7f00342f7e8868f5cccf2ec93b4e1
|
6515b40520740f675f1e9060b813f40a68b137bd
|
/R/AICc.R
|
133bc39ee46eb7733f15945ef6cc25239594412e
|
[] |
no_license
|
TaddyLab/gamlr
|
2e760bae5f5dd15582a0549757183f1853128a65
|
b441d514da9068c34fe1300a62c8cbc46e7ea0a5
|
refs/heads/master
| 2023-05-01T13:56:28.369330
| 2023-04-16T17:28:22
| 2023-04-16T17:28:22
| 11,212,706
| 21
| 8
| null | 2018-02-10T01:39:06
| 2013-07-06T03:54:15
|
C
|
UTF-8
|
R
| false
| false
| 242
|
r
|
AICc.R
|
## corrected AIC calculation
AICc <- function(object, k=2){
ll <- logLik(object)
d <- attributes(ll)$df
n <- attributes(ll)$nobs
ic <- -2*ll+ k*d*n/(n-d-1)
ic[d+1 > n] <- Inf
attributes(ic)[c('df','nobs','class')] <- NULL
ic
}
|
305c3c4b304dc9f54848f3099ee79d9ae7b47071
|
91e0ea814dd7f6cb60fd2fa343320acb99de9294
|
/2011/puntos_ideales_2011.R
|
00fb28b00fcce35a976e756826d613a3ea78eacc
|
[] |
no_license
|
fedecarles/votaciones
|
61f4560d82cba5b5eec9d20d1964f7e18f75bd6a
|
c4e3fc499d1a771810d80e52c960bc634c82f7a7
|
refs/heads/master
| 2021-01-20T21:29:50.638642
| 2012-01-09T00:07:28
| 2012-01-09T00:07:28
| 3,132,903
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,807
|
r
|
puntos_ideales_2011.R
|
# File-Name: puntos_ideales_2011.R
# Date: 08.01.12
# Author: Federico Carlés
# Email: fedecarles@gmail.com
# Data: 129.csv, Honorable Cámara de Diputados de la Nación
# Packages Used: pscl (Simon Jackman), ggplot2 (Hadley Wickham)
# Los cálculos sobre la matriz de votaciones están hechos en base al método
# desarrollado por Keith Poole en "SPATIAL MODELS OF PARLIAMENTARY VOTING" (2005)
library(pscl)
library(ggplot2)
#setwd() Especificar el working directory.
####### CARGA Y PREPARACIÓN DE DATOS ##############################
per.129<-read.delim("Documentos/VN/129.csv", header=F, sep=",")
# Borramos a aquellos legisladores que no hayan estado en al menos el 70%
# de la votaciones.
df.129 <-per.129[apply(per.129, 1, function(v) sum(!is.na(v)) >= 38 ), ]
df.129<-df.129[-which(rowSums(df.129[4:44]=="2")>=38),]
#df.129<-replace(df.129, df.129== 2,0)
#df.129<-replace(df.129,df.129==9,0)
#df.129[is.na(df.129)]<- 99
nombres<-df.129[,1]
legData<-data.frame(df.129[,2],length(df.129[,2]),1)
colnames(legData)<-"party"
legis<-df.129$V1
partido<-df.129$V2
df.129<-df.129[,4:58]
# Función de afiliación. De Simon Jackman. Calcula la proporción en que dos
# elementos de la matriz coinciden entre si.
agreement <- function(x){
n <- dim(x)[1]
k <- dim(x)[2]
a <- matrix(NA,n,n)
for(i in 1:n){
for(j in 1:n){
a[i,j] <- sum(x[i,]==x[j,],na.rm=TRUE)/sum(!is.na(x[i,]) & !is.na(x[j,]))
}
}
a[is.nan(a)] <- 0
a
}
Sa<-agreement(df.129)
Sd <- (1-Sa)^2 # Convierte la matriz de acuerdo a una matriz de distancia.
# Función de centrado de la matriz. A cada valor se le resta la media de
# las filas y la media de las columnas. Luego se le suma la media de la matriz
# y se divide por -2.
doubleCenter <- function(x){
n <- dim(x)[1]
k <- dim(x)[2]
rowMeans <- matrix(apply(x,1,mean,na.rm=TRUE),n,k,byrow=TRUE)
colMeans <- matrix(apply(x,2,mean,na.rm=TRUE),n,k,byrow=FALSE)
matrixMean <- matrix(mean(x,na.rm=TRUE),n,k)
(x - rowMeans - colMeans + matrixMean)/-2
}
Sd <- doubleCenter(Sd)
Se <- eigen(Sd) # Extrae los vectores y valores propios de la matriz centrada.
Slambda <- Se$values
Sx <- Se$vector[,1] * sqrt(Slambda[1])
names(Sx)<-partido
####### Cálculo de los promedios por Bloque ######################
medias<- aggregate (Sx, by=list(names(Sx)),FUN=mean )
med<- as.numeric(medias$x)
names(med)<-medias$Group.1
png("Documentos/promedios2011.png", width=1280, height=900, res=150, unit="px")
qplot( medias$x, y = reorder(medias$Group.1, medias$x), data = medias,
xlab="<< Oposición - Oficialismo >>", ylab=NULL,
main="HCDN - Período 129")
dev.off()
###### Gráficos por legisladores ################################
Sx.df<-as.data.frame(Sx)
#Sx.df$cent<-scale(Sx.df$Sx, scale=T, center=T)
Sx.df$legis<-legis
Sx.df$partido<- partido
#Sx.df<-Sx.df[order(-Sx.df$cent),]
###### Oficialismo ##############################################
Sx.ofi<-Sx.df[order(-Sx.df$Sx),]
ofi<-Sx.ofi[1:10,]
ofi<-ofi[order(ofi$Sx),]
ofi<-ofi[with(ofi,order(Sx)), ]
ofi$legis <- ordered(ofi$legis, levels=levels(ofi$legis)[unclass(ofi$legis)])
png("Documentos/ofi2011.png", width=1280, height=900, res=150, unit="px")
qplot(ofi$Sx,ofi$legis,main="Los 10 legisladores mas oficialistas 2011",
xlab=" Oficialismo >>>",ylab=NULL)
dev.off()
###### Oposición #################################################
Sx.opo<-Sx.df[order(-Sx.df$Sx),]
opo<-Sx.opo[239:248,]
opo<-opo[order(opo$Sx),]
opo<-opo[with(opo,order(Sx)), ]
opo$legis <- ordered(opo$legis, levels=levels(opo$legis)[unclass(opo$legis)])
png("Documentos/opo2011.png", width=1280, height=900, res=150, unit="px")
qplot(opo$Sx,opo$legis, main="Los 10 legisladores mas opositores 2011",
xlab="<<< Oposición",ylab=NULL)
dev.off()
|
5f86f0f5c29b3a9cd936a28bd4e93f622e53ebab
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/lmomco/examples/quacau.Rd.R
|
4172b183431448c6b8fe949c19954ea2e74be2f5
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 255
|
r
|
quacau.Rd.R
|
library(lmomco)
### Name: quacau
### Title: Quantile Function of the Cauchy Distribution
### Aliases: quacau
### Keywords: distribution quantile function Distribution: Cauchy
### ** Examples
para <- c(12,12)
quacau(.5,vec2par(para,type='cau'))
|
934025518829b7c1772da74dad8dbc28702bcbe6
|
51ef9fa1b2212c659152fac242bda47e7bf15d6a
|
/man/get_last_n_exchangerate_tables.Rd
|
29926b2e69863ab963a52221f6cf8de70601152a
|
[] |
no_license
|
cran/rnbp
|
dc5771835c012e6872cc1a7128ad017234db0dad
|
7ccc244007541379fc729d5ab869bd329ef06280
|
refs/heads/master
| 2021-06-25T03:25:57.463674
| 2021-06-07T06:30:02
| 2021-06-07T06:30:02
| 199,285,520
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,062
|
rd
|
get_last_n_exchangerate_tables.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/endpoint_tables.R
\name{get_last_n_exchangerate_tables}
\alias{get_last_n_exchangerate_tables}
\title{Retrieves the last n exchange rate tables.}
\usage{
get_last_n_exchangerate_tables(table, n)
}
\arguments{
\item{table}{specifies which table should be fetched.}
\item{n}{number of exchange rate tables to retrieve.}
}
\value{
nbp_api_response object containing the last n
exchange rate tables.
}
\description{
Retrieves the last n exchange rate tables.
}
\examples{
\donttest{
tryCatch({
## Fetch the last 3 A exchange rate tables
response <- get_last_n_exchangerate_tables("A", 3)
## Preview response content
response$content
},
error = function(e) message(e)
)
}
}
\seealso{
\url{https://api.nbp.pl/#kursyWalut}
Other tables:
\code{\link{get_current_exchangerate_table}()},
\code{\link{get_exchangerate_table_from}()},
\code{\link{get_exchangerate_tables_from_interval}()},
\code{\link{get_todays_exchangerate_table}()}
}
\concept{tables}
|
53b4bfe7e6ad7e7a31a6d6cd1a2749c52ef2354b
|
c77069c2dc6dbf3f9449a44e06d70b540a1912b5
|
/R/remove_site.R
|
4e09eaa5fe3b527392c1aaebb5f9b35f692ed216
|
[] |
no_license
|
cran/phenology
|
62b323a9231c3701568de58c57a804e043abe6a2
|
991d2c35dcbcf1fcff23cbcc0c2f82b74a868dfb
|
refs/heads/master
| 2023-04-15T03:37:51.464388
| 2023-04-01T09:10:02
| 2023-04-01T09:10:02
| 17,698,504
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,512
|
r
|
remove_site.R
|
#' remove_site removes beach information from a set of parameters.
#' @title Removes site information from a set of parameters.
#' @author Marc Girondot
#' @return Return a set of modified parameters
#' @param parameters Set of parameters
#' @param help If TRUE, an help is displayed
#' @description This function is used to remove the information of the site
#' from a set of parameters. It can be used to other timeseries after.
#' @family Phenology model
#' @examples
#' library(phenology)
#' # Read a file with data
#' data(Gratiot)
#' # Generate a formatted list nammed data_Gratiot
#' data_Gratiot<-add_phenology(Gratiot, name="Complete",
#' reference=as.Date("2001-01-01"), format="%d/%m/%Y")
#' # Generate initial points for the optimisation
#' parg<-par_init(data_Gratiot, fixed.parameters=NULL)
#' # Run the optimisation
#' \dontrun{
#' result_Gratiot<-fit_phenology(data=data_Gratiot,
#' fitted.parameters=parg, fixed.parameters=NULL)
#' }
#' data(result_Gratiot)
#' # Extract parameters form result
#' parg<-extract_result(result_Gratiot)
#' # Remove site information
#' parg1<-remove_site(parg)
#' @export
remove_site <-
function(parameters=NULL, help=FALSE) {
if(help) {
cat("This function is used to remove the information of the site\n")
cat("from a set of parameters. It can be used to other timeseries after.\n")
} else {
if (!is.null(parameters)) {
for(i in 1:length(parameters)) {
names(parameters)[i]<-strsplit(names(parameters[i]), "_")[[1]][1]
}
return(parameters)
}
}
}
|
df849911c8c7fcc7fa63627c665ff3c2bb5874bb
|
8bfd2b99cff65dff6a1b08723622fc7b6cc2d2ff
|
/man/read_empatica_events.Rd
|
335b8c8097763857dec8bc5e8db747153d222f4a
|
[] |
no_license
|
bwrc/empatica-r
|
59eef1e037a743bf89e0bea713b39366e6ae93e7
|
e2b0d42468830009c6972453bafcc0a1d7496845
|
refs/heads/master
| 2020-04-15T15:23:47.741959
| 2017-07-12T19:08:04
| 2017-07-12T19:08:04
| 43,059,899
| 5
| 4
| null | 2017-07-12T19:08:05
| 2015-09-24T10:17:51
|
R
|
UTF-8
|
R
| false
| false
| 434
|
rd
|
read_empatica_events.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/utilities.R
\name{read_empatica_events}
\alias{read_empatica_events}
\title{Read Empatica events (button presses)}
\usage{
read_empatica_events(f)
}
\arguments{
\item{f}{The name of the file containing the events (\code{tags.csv}).}
}
\value{
A data frame with the events
}
\description{
Read Empatica events (button presses)
}
\keyword{internal}
|
9de00d01c57647f7356ca799f371926f9529010a
|
b35f701f42ab3ad56ecbebe9e08a454efd276c7d
|
/R/proportional_hazards_data.R
|
004ce0ee570b22f781feffb6bd954c51d75fa308
|
[
"MIT"
] |
permissive
|
ClinicoPath/censored
|
460e760d97763f3a0cab4246a4e1208d85791821
|
848be00c4ce826c22f29eb9aa444649671cdb69a
|
refs/heads/master
| 2023-04-29T05:55:21.976333
| 2021-05-17T13:24:31
| 2021-05-17T13:24:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,057
|
r
|
proportional_hazards_data.R
|
# These functions define the proportional hazards models.
# They are executed when this package is loaded via `.onLoad()` and modify the
# parsnip package's model environment.
# These functions are tested indirectly when the models are used. Since this
# function is executed on package startup, you can't execute them to test since
# they are already in the parsnip model database. We'll exclude them from
# coverage stats for this reason.
# nocov
make_proportional_hazards_survival <- function() {
parsnip::set_model_engine("proportional_hazards", mode = "censored regression", eng = "survival")
parsnip::set_dependency("proportional_hazards", eng = "survival", pkg = "survival")
parsnip::set_dependency("proportional_hazards", eng = "survival", pkg = "riskRegression")
parsnip::set_dependency("proportional_hazards", eng = "survival", pkg = "censored")
set_model_arg(
model = "proportional_hazards",
eng = "glmnet",
parsnip = "penalty",
original = "lambda",
func = list(pkg = "dials", fun = "penalty"),
has_submodel = TRUE
)
set_model_arg(
model = "proportional_hazards",
eng = "glmnet",
parsnip = "mixture",
original = "alpha",
func = list(pkg = "dials", fun = "mixture"),
has_submodel = FALSE
)
parsnip::set_fit(
model = "proportional_hazards",
eng = "survival",
mode = "censored regression",
value = list(
interface = "formula",
protect = c("formula", "data"),
func = c(pkg = "survival", fun = "coxph"),
defaults = list(x = TRUE, model = TRUE)
)
)
parsnip::set_encoding(
model = "proportional_hazards",
eng = "survival",
mode = "censored regression",
options = list(
predictor_indicators = "none",
compute_intercept = FALSE,
remove_intercept = FALSE,
allow_sparse_x = FALSE
)
)
parsnip::set_pred(
model = "proportional_hazards",
eng = "survival",
mode = "censored regression",
type = "time",
value = list(
pre = cph_survival_pre,
post = function(x, object) {
unname(summary(x)$table[, "*rmean"])
},
func = c(fun = "survfit"),
args =
list(
formula = quote(object$fit),
newdata = quote(new_data),
na.action = quote(stats::na.exclude)
)
)
)
parsnip::set_pred(
model = "proportional_hazards",
eng = "survival",
mode = "censored regression",
type = "survival",
value = list(
pre = cph_survival_pre,
post = NULL,
func = c(pkg = "censored", fun = "cph_survival_prob"),
args =
list(
x = quote(object$fit),
new_data = quote(new_data),
.times = rlang::expr(.time)
)
)
)
parsnip::set_pred(
model = "proportional_hazards",
eng = "survival",
mode = "censored regression",
type = "linear_pred",
value = list(
pre = NULL,
post = function(x, object) {
# For consistency with other models, we want the lp to increase with
# time. For this, we change the sign
-unname(x)
},
func = c(fun = "predict"),
args =
list(
object = quote(object$fit),
newdata = quote(new_data),
na.action = quote(stats::na.exclude)
)
)
)
}
make_proportional_hazards_glmnet <- function() {
parsnip::set_model_engine("proportional_hazards", mode = "censored regression", eng = "glmnet")
parsnip::set_dependency("proportional_hazards", eng = "glmnet", pkg = "glmnet")
parsnip::set_dependency("proportional_hazards", eng = "glmnet", pkg = "censored")
parsnip::set_fit(
model = "proportional_hazards",
eng = "glmnet",
mode = "censored regression",
value = list(
interface = "matrix",
protect = c("x", "y", "weights"),
func = c(pkg = "censored", fun = "glmnet_fit_wrapper"),
defaults = list()
)
)
parsnip::set_encoding(
model = "proportional_hazards",
eng = "glmnet",
mode = "censored regression",
options = list(
predictor_indicators = "traditional",
compute_intercept = TRUE,
remove_intercept = TRUE,
allow_sparse_x = TRUE
)
)
set_model_arg(
model = "proportional_hazards",
eng = "glmnet",
parsnip = "penalty",
original = "lambda",
func = list(pkg = "dials", fun = "penalty"),
has_submodel = TRUE
)
set_model_arg(
model = "proportional_hazards",
eng = "glmnet",
parsnip = "mixture",
original = "alpha",
func = list(pkg = "dials", fun = "mixture"),
has_submodel = FALSE
)
parsnip::set_pred(
model = "proportional_hazards",
eng = "glmnet",
mode = "censored regression",
type = "linear_pred",
value = list(
pre = NULL,
post = organize_glmnet_pred,
func = c(fun = "predict"),
args =
list(
object = expr(object$fit),
newx = expr(as.matrix(new_data)),
type = "link",
s = expr(object$spec$args$penalty)
)
)
)
parsnip::set_pred(
model = "proportional_hazards",
eng = "glmnet",
mode = "censored regression",
type = "survival",
value = list(
pre = NULL,
post = NULL,
func = c(pkg = "censored", fun = "coxnet_survival_prob"),
args =
list(
x = expr(object$fit),
new_data = expr(new_data),
.times = expr(.time),
s = expr(object$spec$args$penalty),
training_data = expr(object$training_data)
)
)
)
}
# nocov end
#' Wrapper for glmnet for censored
#'
#' Not to be used directly by users
#'
#' @inheritParams glmnet::glmnet
#' @param ... additional parameters passed to glmnet::glmnet.
#' @export
#' @keywords internal
glmnet_fit_wrapper <- function(x, y, alpha = 1, lambda = NULL, ...) {
fit <- glmnet::glmnet(x, y, family = "cox",
alpha = alpha, lambda = lambda, ...)
res <- list(fit = fit,
x = x,
y = y
)
class(res) <- "coxnet"
res
}
|
324b73eca3ef8ca7103e94ba9e794c0828244f82
|
12bd3d5a0377fb2c709b99b71f0f43bfedace4c2
|
/R/ReadInterleavedNexus.R
|
c8c22027b56cdb742fe185d5591548dbe59e229d
|
[] |
no_license
|
bbanbury/phrynomics
|
58666270c415c46cdf1e2b7faab34f865fbf3a35
|
42c393473d0627d5c2b95989f0f6036dc5038c70
|
refs/heads/master
| 2023-05-25T05:13:17.346748
| 2023-05-16T00:15:38
| 2023-05-16T00:15:38
| 14,935,999
| 25
| 7
| null | 2023-05-16T00:15:39
| 2013-12-04T21:13:46
|
R
|
UTF-8
|
R
| false
| false
| 1,481
|
r
|
ReadInterleavedNexus.R
|
#' Read Interleaved Nexus File
#'
#' Read Interleaved Nexus File
#'
#' This function reads in SNP datasets that are interleaved and converts them to data frame. These can then be written as standard nexus or phylip formatted files.
#'
#' @aliases ReadInterleavedNexus
#' @param file A file name specified by either a variable or a double-quoted string (ie, file=""). Also accepts R objects that are data frames or matrices and converts to the class "snp".
#' @export
#' @return Returns a data frame with rownames as species and column(s) as loci
#' @seealso \link{WriteSNP}
ReadInterleavedNexus <- function(file){
#make sure this works with multiple loci?
dat <- scan(file, what="character", sep="\n")
if(length(grep("\t;", dat) > 1))
dat <- dat[-grep("\t;", dat)]
whereDataStarts <- grep("matrix", dat, ignore.case=TRUE)+1
whereDataEnds <- grep("end", dat)[which(grep("end", dat) > whereDataStarts)][1] -1
dat <- dat[whereDataStarts:whereDataEnds]
datlist <- strsplit(dat, split="[ +]", perl=TRUE)
dattable <- matrix(nrow=length(datlist), ncol=2)
for(i in sequence(length(datlist))){
dattable[i,] <- c(datlist[[i]][which(datlist[[i]] != "")])
}
newdattable <- matrix(nrow=length(unique(dattable[,1])), ncol=1)
rownames(newdattable) <- unique(dattable[,1])
for(i in sequence(dim(newdattable)[1])){
newdattable[i,1] <- paste(dattable[which(dattable[,1] == rownames(newdattable)),2], collapse="")
}
return(as.data.frame(newdattable))
}
|
fde7955c91afd1cd0e158a7711bf08b1231c5c5a
|
5c09b66c0bd8fb0f7b3bb93d9a039810a0702e47
|
/R/MachineLearning/XI - FrequenceItens.r
|
3f6412f7dbd38b9f9fa653f6467787b6127b9dfe
|
[] |
no_license
|
leandrocotrim/curso_R_PY
|
a4ccb1020c7aa33dd4a38a0b1d3fe41e9f44028e
|
5a9844e9dbd4f765837ea25dee489866ad51bbd1
|
refs/heads/master
| 2020-06-15T00:15:40.035284
| 2019-08-16T11:08:24
| 2019-08-16T11:08:24
| 195,161,930
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 425
|
r
|
XI - FrequenceItens.r
|
library(arules)
transacoes = read.transactions(
'..\\projects\\curso_R_PY\\R\\MachineLearning\\transacoes2.txt',
format='basket', sep = ',')
dim(transacoes)
summary(transacoes)
# melhor visualização
inspect(transacoes)
image(transacoes)
# rules
regras = eclat(transacoes, parameter = list(supp = 0.1, maxlen = 15))
inspect(regras)
library(arulesViz)
plot(regras, method = 'graph', control= list(type="items"))
|
d237a7a2ef4ac059d319b76e72bac848e904b0d9
|
44cb5ed0fd4f72e67bc467bda7ab5ae918838595
|
/R/immune/run_base.R
|
b6c5ca0f9df85c7fcbf809ffe8386ebb36ca1342
|
[
"MIT"
] |
permissive
|
Kcjohnson/SCGP
|
2d5e020a5444edc0e4dceb1df147d81562d93769
|
e757b3b750ce8ccf15085cb4bc60f2dfd4d9a285
|
refs/heads/master
| 2020-09-06T00:33:17.964512
| 2019-12-11T15:18:11
| 2019-12-11T15:18:11
| 220,259,596
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 934
|
r
|
run_base.R
|
#!/usr/bin/env Rscript
library(DBI)
library(odbc)
library(reshape2)
con <- DBI::dbConnect(odbc::odbc(), "GLASSv3")
q <- "SELECT * FROM analysis.gene_tpm"
tpm <- dbGetQuery(con,q)
args = commandArgs(trailingOnly=TRUE)
# test if there is at least one argument: if not, return an error
if (length(args) < 2) {
stop("Please provide an input and a mapping file", call.=FALSE)
} else if (length(args)==2) {
# default output file
args[3] = "out.txt"
}
sig_matrix <- args[2]
output <- args[3]
#Convert tpm long format into expression matrix
tpm_matrix <- dcast(tpm, aliquot_barcode ~ gene_symbol, value.var=tpm)
tpm_matrix <- log10(tpm_matrix+1)
#Filter out genes that are not expressed in at least 3 samples
tag = apply(tpm_matrix>0,1,sum)
tpm_matrix = tpm_matrix[tag>=3,]
source("/R/immune/base.R")
res <- base(tpm_matrix, sig_matrix, perm=1000, median.norm=T))
write.table(res, myoutf, sep="\t", row.names=T, quote=F)
|
119f86547864997d28f4651a5c9487dc7e15176b
|
a17cf22be2304c96d267fc1b68db7b7279c4a293
|
/R/mergeTables.R
|
a9010b172741bedff8ec24fafc3d734dae691069
|
[] |
no_license
|
robertdouglasmorrison/DuffyTools
|
25fea20c17b4025e204f6adf56c29b5c0bcdf58f
|
35a16dfc3894f6bc69525f60647594c3028eaf93
|
refs/heads/master
| 2023-06-23T10:09:25.713117
| 2023-06-15T18:09:21
| 2023-06-15T18:09:21
| 156,292,164
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,296
|
r
|
mergeTables.R
|
# mergeTables.R - combine two 1-D tables that may or may not have common categories
`mergeTables` <- function( t1, t2) {
nam1 <- names(t1); nam2 <- names(t2);
#allNames <- .Internal( unique( c( nam1, nam2), FALSE, FALSE))
allNames <- unique.default( c( nam1, nam2))
ord <- .Internal( order( TRUE, FALSE, allNames))
allNames <- allNames[ ord]
len <- length( allNames)
allCounts <- rep( 0, times=len)
where1 <- .Internal( match( nam1, allNames, NA, NULL))
allCounts[ where1] <- allCounts[ where1] + t1
where2 <- .Internal( match( nam2, allNames, NA, NULL))
allCounts[ where2] <- allCounts[ where2] + t2
names(allCounts) <- allNames
if ( any( is.na( allNames))) length(allCounts) <- length(allCounts) - 1
class(allCounts) <- "table"
allCounts
}
`mergeIntegerTables` <- function( t1, t2) {
allNames <- sort.int( as.integer( unique.default( c( names(t1), names(t2)))))
len <- length( allNames)
allCounts <- rep( 0, times=len)
where1 <- base::match( as.integer(names(t1)), allNames, nomatch=0)
allCounts[ where1] <- allCounts[ where1] + t1
where2 <- base::match( as.integer(names(t2)), allNames, nomatch=0)
allCounts[ where2] <- allCounts[ where2] + t2
dim(allCounts) <- len
dimnames(allCounts) <- list( allNames)
class(allCounts) <- "table"
return( allCounts)
}
|
c9dbf9f7f74b8515df341bc1ebd350e99355a859
|
e590b198ae4387935d51f452835a4e629eca9b3f
|
/R/brownieREML.R
|
6f4e357e8d5ceb5c314a3da7ff913c03c9ba5d4c
|
[] |
no_license
|
mrhelmus/phytools
|
526c035cf18c83c62875d119c7d36a497d7af04b
|
a8ed2a28248996ac949f31800f3398428370e5aa
|
refs/heads/master
| 2021-01-15T20:43:37.310385
| 2014-06-28T00:00:00
| 2014-06-28T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,489
|
r
|
brownieREML.R
|
# This function is a simplified REML version of brownie.lite()
# written by Liam J. Revell 2011, 2013
brownieREML<-function(tree,x,maxit=2000){
# bookkeeping
if(!is.binary.tree(tree)) tree<-multi2di(tree)
x<-x[tree$tip.label] # order in tip.label order
n<-length(x) # number of species
p<-ncol(tree$mapped.edge) # number of states
# fit the single rate model
lik1<-function(sig1,tree,x){
tt<-scaleByMap(tree,setNames(rep(sig1,p),colnames(tree$mapped.edge)))
picX<-pic(x,tt,scaled=FALSE,var.contrasts=TRUE)
logL<-sum(dnorm(picX[,1],sd=sqrt(picX[,2]),log=TRUE))
return(-logL)
}
sig1<-mean(pic(x,tree)^2)
logL1<--lik1(sig1,tree,x)
# fit the multiple rate model
lik2<-function(sig2,tree,x){
tt<-scaleByMap(tree,sig2)
picX<-pic(x,tt,scaled=F,var.contrasts=T)
logL<-sum(dnorm(picX[,1],sd=sqrt(picX[,2]),log=TRUE))
return(-logL)
}
YY<-optim(setNames(rep(1,p)*runif(n=p),colnames(tree$mapped.edge)),lik2,tree=tree,x=x,method="L-BFGS-B",lower=rep(1e-8,p))
sig2<-YY$par
logL2<--YY$value
convergence=(YY$convergence==0)
return(list(sig2.single=sig1,logL1=logL1,sig2.multiple=sig2,logL2=logL2,convergence=convergence))
}
# This function scales a mapped tree by sig2
# written by Liam J. Revell 2011
scaleByMap<-function(mtree,sig2){
edge.length<-mtree$mapped.edge[,names(sig2)]%*%sig2
tree<-list(Nnode=mtree$Nnode,edge=mtree$edge,tip.label=mtree$tip.label,edge.length=edge.length[,1])
class(tree)<-"phylo"
return(tree)
}
|
4df711cb9ea7995065a6c70f83ce682fa66addaf
|
27814ee35a8d43a54f9c1105a0c0d38e0f6ec16b
|
/R/grenander.R
|
62c4d38def04b7f0ab960e844b4e7a9ef89d7b3d
|
[] |
no_license
|
cran/fdrtool
|
31243ed0476e56daf7cc1484fedc89c0341bccf3
|
9c0c761e6a73e3fdc166d7ea8cc70894bd3fa8bd
|
refs/heads/master
| 2021-11-25T01:33:09.804545
| 2021-11-13T19:30:11
| 2021-11-13T19:30:11
| 17,696,021
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,932
|
r
|
grenander.R
|
### grenander.R (2007-06-13)
###
### Grenander Density Estimator
###
### Copyright 2006-2007 Korbinian Strimmer
###
###
### This file is part of the `fdrtool' library for R and related languages.
### It is made available under the terms of the GNU General Public
### License, version 3, or at your option, any later version,
### incorporated herein by reference.
###
### This program is distributed in the hope that it will be
### useful, but WITHOUT ANY WARRANTY; without even the implied
### warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
### PURPOSE. See the GNU General Public License for more
### details.
###
### You should have received a copy of the GNU General Public
### License along with this program; if not, write to the Free
### Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
### MA 02111-1307, USA
grenander = function(F, type=c("decreasing", "increasing"))
{
if( !any(class(F) == "ecdf") ) stop("ecdf object required as input!")
type <- match.arg(type)
if (type == "decreasing")
{
# find least concave majorant of ECDF
ll = gcmlcm(environment(F)$x, environment(F)$y, type="lcm")
}
else
{
# find greatest convex minorant of ECDF
l = length(environment(F)$y)
ll = gcmlcm(environment(F)$x, c(0,environment(F)$y[-l]), type="gcm")
}
f.knots = ll$slope.knots
f.knots = c(f.knots, f.knots[length(f.knots)])
g = list(F=F,
x.knots=ll$x.knots,
F.knots=ll$y.knots,
f.knots=f.knots)
class(g) <- "grenander"
return(g)
}
plot.grenander <- function(x, ...)
{
if (x$f.knots[1] > x$f.knots[2])
main = "Grenander Decreasing Density"
else
main = "Grenander Increasing Density"
par(mfrow=c(1,2))
plot(x$x.knots, x$f.knots, type="s", xlab="x", ylab="fn(x)",
main=main, col=4, lwd=2, ...)
plot(x$F, do.points=FALSE)
lines(x$x.knots, x$F.knots, type='l', col=4, lwd=2)
par(mfrow=c(1,1))
}
|
3ae1ebe0607d1fa8a058ac1a6c27c5f57a2b22ee
|
669667464586efb1d2ce211ba5d9e4a5eb875e48
|
/analysis/DSTL07analysis.R
|
5951982f076fad0a703fc4b202f2f38c66a8e403
|
[] |
no_license
|
ceredmunds/DSTL07
|
7e1fd3312370774b6c9e23e039ac0d610e4f3135
|
4746227658de27be2100c6374e2b8504606a6e71
|
refs/heads/main
| 2023-05-08T05:32:29.834669
| 2021-06-03T09:43:37
| 2021-06-03T09:43:37
| 302,604,957
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,778
|
r
|
DSTL07analysis.R
|
# Preprocessing of DSTL07 experiment data
# C. E. R. Edmunds, Queen Mary, University of London.
# 18-3-2021
# Setup --------------------------------------------------------------------------------------------
rm(list=ls())
require(data.table); require(ggplot2); require(viridis); require(emmeans); require(BayesFactor);
require(car)
data <- fread('../data/DSTL07longData.csv')
data <- data[participant_id!=193,] # Participant with number of trials to criterion of 200
data <- data[participant_id!=3,] # Participant with double number of test trials
# Learning graphs ----------------------------------------------------------------------------------
lData <- data[experiment_phase=="learning",]
lData[, Accuracy:= ifelse(correct, 1, 0)]
# Getting summary statistics of category learning
lSummary <- lData[, list(trialsToCriterion=.N,
meanAccuracy=mean(Accuracy),
meanRT=mean(rt)),
by=.(participant_id, displayCondition, socialCondition)]
lGraphData <- lSummary[, list(meanTrials=mean(trialsToCriterion),
sdTrials=sd(trialsToCriterion),
meanAccuracy=mean(meanAccuracy),
sdAccuracy=sd(meanAccuracy),
meanRT=mean(meanRT),
sdRT=sd(meanRT),
N=.N),
by=.(displayCondition, socialCondition)]
lGraphData[, `:=`(nTrialsError=sqrt(2)*qnorm(0.975)*sdTrials/sqrt(N),
accuracyError=sqrt(2)*qnorm(0.975)*sdAccuracy/sqrt(N),
rtError=sqrt(2)*qnorm(0.975)*sdRT/sqrt(N))]
# Summarising number of trials to criterion
ggplot(lGraphData, aes(x=displayCondition, y=meanTrials, fill=socialCondition)) +
geom_bar(position="dodge", stat="identity") +
geom_errorbar(aes(ymin=meanTrials-nTrialsError, ymax=meanTrials+nTrialsError), width=.1,
position=position_dodge(.9)) +
scale_fill_viridis(discrete = T) +
labs(x="Display condition", fill="Social condition", y="Mean number of trials to criterion") +
theme_bw()
ggsave("../techReport/images/DSTL07trialsCriterion.pdf", units="in", width=5, height=3)
# Summarising reaction times
ggplot(lGraphData, aes(x=displayCondition, y=meanRT, fill=socialCondition)) +
geom_bar(position="dodge", stat="identity") +
geom_errorbar(aes(ymin=meanRT-rtError, ymax=meanRT+rtError), width=.1,
position=position_dodge(.9)) +
scale_fill_viridis(discrete = T) +
labs(x="Display condition", fill="Social condition", y="Mean reaction time (ms)") +
theme_bw()
ggsave("../techReport/images/DSTL07learningRT.pdf", units="in", width=5, height=3)
# Learning phase: Trials to criterion --------------------------------------------------------------
# Tag outliers
boxplot <- boxplot(trialsToCriterion~displayCondition*socialCondition, lSummary)
quartiles <- lSummary[, list(LQ = quantile(trialsToCriterion, probs=0.25, na.rm=F),
UQ = quantile(trialsToCriterion, probs=0.75, na.rm=F)),
by=.(displayCondition, socialCondition)]
quartiles[, IQR:=UQ-LQ]
quartiles[, `:=`(upperBoundary=UQ+1.5*IQR,
lowerBoundary=LQ-1.5*IQR)]
lSummary <- merge(lSummary, quartiles[, .(displayCondition, socialCondition, upperBoundary)]) # Lower boundary all negative so ignore
lSummary[, nTrialsOutlier:= ifelse(trialsToCriterion>upperBoundary, 1, 0)]
lSummary[nTrialsOutlier==1,]
# NHST: to test assumptions
ntrials.aov <- aov(trialsToCriterion ~ displayCondition*socialCondition,
data=lSummary[nTrialsOutlier==0,])
summary(ntrials.aov)
# Check homogeneity of variance
plot(ntrials.aov, 1)
leveneTest(trialsToCriterion ~ displayCondition*socialCondition, data=lSummary[nTrialsOutlier==0,])
# Significant: homogeneity of variance violated
# Check normality
plot(ntrials.aov, 2)
aov_residuals <- residuals(object=ntrials.aov)
shapiro.test(x=aov_residuals)
# Significant: therefore violations of normality
# NHST: to report
ntrials.aov <- aov(log(trialsToCriterion) ~ displayCondition*socialCondition,
data=lSummary[nTrialsOutlier==0,])
summary(ntrials.aov)
# Get means
emmeans(ntrials.aov, ~socialCondition)
emmeans(ntrials.aov, ~displayCondition*socialCondition)
# Bayesian
lSummary[, displayCondition:= as.factor(displayCondition)]
lSummary[, socialCondition:= as.factor(socialCondition)]
lSummary[, logTrialsToCriterion:= log(trialsToCriterion)]
ntrials.bf <- anovaBF(logTrialsToCriterion ~ displayCondition*socialCondition, data=lSummary[nTrialsOutlier==0,])
summary(ntrials.bf)
ntrials.bf[4]/ntrials.bf[3]
# Learning phase: Reaction times -------------------------------------------------------------------
# Tag outliers
boxplot <- boxplot(meanRT~displayCondition*socialCondition, lSummary)
quartiles <- lSummary[, list(LQ = quantile(meanRT, probs=0.25, na.rm=F),
UQ = quantile(meanRT, probs=0.75, na.rm=F)),
by=.(displayCondition, socialCondition)]
quartiles[, IQR:=UQ-LQ]
quartiles[, `:=`(upperBoundaryRT=UQ+1.5*IQR,
lowerBoundaryRT=LQ-1.5*IQR)]
lSummary <- merge(lSummary, quartiles[, .(displayCondition, socialCondition, upperBoundaryRT)]) # Lower boundary all negative so ignore
lSummary[, rtOutlier:= ifelse(meanRT>upperBoundaryRT, 1, 0)]
lSummary[rtOutlier==1,]
# NHST: to test assumptions
rt.aov <- aov(meanRT ~ displayCondition*socialCondition, data=lSummary[rtOutlier==0,])
summary(rt.aov)
# Check homogeneity of variance
plot(rt.aov, 1)
leveneTest(meanRT ~ displayCondition*socialCondition, data=lSummary[rtOutlier==0])
# Significant: violation of homogeneity of variance
# Check normality
plot(rt.aov, 2)
aov_residuals <- residuals(object=rt.aov)
shapiro.test(x=aov_residuals)
# Significant: therefore violations of normality
# NHST: to report
rt.aov <- aov(log(meanRT) ~ displayCondition*socialCondition, data=lSummary[rtOutlier==0,])
summary(rt.aov)
emmeans(rt.aov, ~displayCondition)
emmeans(rt.aov, ~socialCondition)
# Bayesian
lSummary[, logMeanRT:=log(meanRT)]
rt.bf <- anovaBF(logMeanRT ~ displayCondition*socialCondition, lSummary)
summary(rt.bf)
rt.bf[4]/rt.bf[3]
# Test phase with all attributes -------------------------------------------------------------------
require(abd)
# Get and format data
tData <- data[experiment_phase=="test",]
# Set up output
ppts <- unique(data$participant_id)
tOutput <- data.table(expand.grid(participant_id=ppts, experiment_phase="test1", dimension=c(1:5, "woDim1", "wDim1"),
statistic=c("Xsquared", "pValue", "oddsRatio"), value=as.numeric(NA)))
tOutput <- merge(tOutput, tData[, .SD[1], by=.(participant_id)][,.(participant_id, displayCondition, socialCondition)])
tOutput[, biased:=0]
for (p in 1:length(ppts)) {
ppt <- ppts[p]
pptData <- tData[participant_id==ppt, ]
for (d in 1:5) {
respTable <- table(pptData[[paste("dimension", d, sep="")]], pptData$abstract_response_label)
chi <- chisq.test(respTable)
tOutput[participant_id==ppt & dimension==d & statistic=="Xsquared", value:= chi$statistic]
tOutput[participant_id==ppt & dimension==d & statistic=="pValue", value:= chi$p.value]
if(dim(respTable)[1]==2 & dim(respTable)[2]==2) {
tOutput[participant_id==ppt & dimension==d & statistic=="oddsRatio", value:= oddsRatio(respTable)]
} else {
tOutput[participant_id==ppt & dimension==d & statistic=="oddsRatio", biased:= 1]
}
}
respTable <- table(pptData$woDim1, pptData$abstract_response_label)
chi <- chisq.test(respTable)
tOutput[participant_id==ppt & dimension=="woDim1" & statistic=="Xsquared", value:= chi$statistic]
tOutput[participant_id==ppt & dimension=="woDim1" & statistic=="pValue", value:= chi$p.value]
if(dim(respTable)[1]==2 & dim(respTable)[2]==2) {
tOutput[participant_id==ppt & dimension=="woDim1" & statistic=="oddsRatio", value:= oddsRatio(respTable)]
} else {
tOutput[participant_id==ppt & dimension=="woDim1" & statistic=="oddsRatio", biased:= 1]
}
respTable <- table(pptData$wDim1, pptData$abstract_response_label)
chi <- chisq.test(respTable)
tOutput[participant_id==ppt & dimension=="wDim1" & statistic=="Xsquared", value:= chi$statistic]
tOutput[participant_id==ppt & dimension=="wDim1" & statistic=="pValue", value:= chi$p.value]
if(dim(respTable)[1]==2 & dim(respTable)[2]==2) {
tOutput[participant_id==ppt & dimension=="wDim1" & statistic=="oddsRatio", value:= oddsRatio(respTable)]
} else {
tOutput[participant_id==ppt & dimension=="wDim1" & statistic=="oddsRatio", biased:= 1]
}
}
# Second test phase
# Get and format data
pData <- data[experiment_phase=="test_partial",]
# Set up output
ppts <- unique(data$participant_id)
pOutput <- data.table(expand.grid(participant_id=ppts, experiment_phase="test2", dimension=c(1:5, "woDim1", "wDim1"),
statistic=c("Xsquared", "pValue", "oddsRatio"), value=as.numeric(NA)))
pOutput <- merge(pOutput, pData[, .SD[1], by=.(participant_id)][,.(participant_id, displayCondition, socialCondition)])
pOutput[, biased:=0]
for (p in 1:length(ppts)) {
ppt <- ppts[p]
pptData <- pData[participant_id==ppt, ]
for (d in 1:5) {
respTable <- table(pptData[[paste("dimension", d, sep="")]], pptData$abstract_response_label)
chi <- chisq.test(respTable)
pOutput[participant_id==ppt & dimension==d & statistic=="Xsquared", value:= chi$statistic]
pOutput[participant_id==ppt & dimension==d & statistic=="pValue", value:= chi$p.value]
# Work out dimensions
if(dim(respTable)[1]==2 & dim(respTable)[2]==2) {
pOutput[participant_id==ppt & dimension==d & statistic=="oddsRatio", value:= oddsRatio(respTable)]
} else {
pOutput[participant_id==ppt & dimension==d & statistic=="Xsquared", biased:= 1]
pOutput[participant_id==ppt & dimension==d & statistic=="pValue", biased:= 1]
pOutput[participant_id==ppt & dimension==d & statistic=="oddsRatio", biased:= 1]
}
}
respTable <- table(pptData$woDim1, pptData$abstract_response_label)
chi <- chisq.test(respTable)
pOutput[participant_id==ppt & dimension=="woDim1" & statistic=="Xsquared", value:= chi$statistic]
pOutput[participant_id==ppt & dimension=="woDim1" & statistic=="pValue", value:= chi$p.value]
if(dim(respTable)[1]==2 & dim(respTable)[2]==2) {
pOutput[participant_id==ppt & dimension=="woDim1" & statistic=="oddsRatio", value:= oddsRatio(respTable)]
} else {
pOutput[participant_id==ppt & dimension=="woDim1" & statistic=="oddsRatio", biased:= 1]
}
respTable <- table(pptData$wDim1, pptData$abstract_response_label)
chi <- chisq.test(respTable)
pOutput[participant_id==ppt & dimension=="wDim1" & statistic=="Xsquared", value:= chi$statistic]
pOutput[participant_id==ppt & dimension=="wDim1" & statistic=="pValue", value:= chi$p.value]
if(dim(respTable)[1]==2 & dim(respTable)[2]==2) {
pOutput[participant_id==ppt & dimension=="wDim1" & statistic=="oddsRatio", value:= oddsRatio(respTable)]
} else {
pOutput[participant_id==ppt & dimension=="wDim1" & statistic=="oddsRatio", biased:= 1]
}
}
output <- rbind(tOutput, pOutput)
# Look at number of participants with biased responding
output[biased==1, ] # One participant in each condition
biasedParticipants <- output[participant_id %in% c(50, 87, 131, 172),]
biasedParticipants[experiment_phase=="test1" & statistic=="Xsquared", .SD[which.max(value)],
by=.(participant_id, experiment_phase)]
# Looking at their verbal reports
data[participant_id %in% c(50, 87, 131, 172) & experiment_phase == "verbal_report_textbox_phase2", ]
data[participant_id %in% c(50, 87, 131, 172) & experiment_phase == "verbal_report_textbox_phase3", ]
# Remove biased participants
tOutput <- tOutput[biased==0, ] # Remove those participants
pOutput <- pOutput[biased==0, ] # Remove those participants
# Determining winning dimension for every participant in first test
winningDimension <- tOutput[statistic=="Xsquared" & dimension!="woDim1",.SD[which.max(value)], by=.(participant_id, experiment_phase)]
test1N <- winningDimension[, list(N=.N), by=.(dimension, displayCondition, socialCondition)]
test1table <- dcast(test1N, displayCondition + socialCondition ~ dimension, value.var="N")
xtable(test1table)
winningDimension <- pOutput[statistic=="Xsquared" & dimension!="wDim1",.SD[which.max(value)], by=.(participant_id, experiment_phase)]
test2N <- winningDimension[experiment_phase=="test2", list(N=.N), by=.(dimension, displayCondition, socialCondition)]
test2table <- dcast(test2N, displayCondition + socialCondition ~ dimension, value.var="N")
xtable(test2table)
winningDimension <- tOutput[statistic=="Xsquared" & dimension!="woDim1",.SD[which.max(value)], by=.(participant_id, experiment_phase)]
pptsDim2 <- winningDimension[dimension!=2 & experiment_phase=="test1", participant_id]
winningDimension <- pOutput[statistic=="Xsquared" & dimension!="wDim1",.SD[which.max(value)], by=.(participant_id, experiment_phase)]
test2N <- winningDimension[experiment_phase=="test2" & participant_id %in% pptsDim2,
list(N=.N), by=.(dimension, displayCondition, socialCondition)]
test2table <- dcast(test2N, displayCondition + socialCondition ~ dimension, value.var="N")
xtable(test2table)
# Confidence ratings: Graph ------------------------------------------------------------------------
cData <- data[experiment_phase=="confidence_rating_test"|experiment_phase=="confidence_rating_partial",]
# Edit data table
cData[, block:=rep(1:5, each=8, times=424)] # Add blocks
cData[, response:=as.numeric(response)] # Make response numeric
# Get summary data
cSummary <- cData[, list(meanResponse = mean(response, na.rm=T)),
by=.(participant_id, experiment_phase, socialCondition, displayCondition, block)]
# Plot graph
cGraphData <- cSummary[, list(confidenceResponse = mean(meanResponse, na.rm=T),
sdConfidence = sd(meanResponse, na.rm=T),
N=.N),
by=.(experiment_phase, socialCondition, displayCondition, block)]
cGraphData[, confidenceError:= sqrt(2)*qnorm(0.975)*sdConfidence/sqrt(N)]
cGraphData[, condition:= paste(socialCondition, displayCondition, sep="-")]
cGraphData[, block:=as.factor(block)]
ggplot(cGraphData, aes(x=block, y=confidenceResponse, color=condition)) +
facet_grid(~experiment_phase) +
geom_bar(aes(y=confidenceResponse, fill=condition), stat="identity", position="dodge") +
geom_errorbar(aes(ymin=confidenceResponse-confidenceError, ymax=confidenceResponse+confidenceError),
width=0.3, position=position_dodge(0.9)) +
scale_colour_viridis(discrete = T) +
scale_fill_viridis(discrete = T) +
theme_bw()
ggsave("../techReport/images/confidenceRatingGraph.pdf", units="in", width=8, height=4)
# Good start: not quite sure what I want to communicate with this graph.
# Confidence: Analysis -----------------------------------------------------------------------------
# ANOVA to test assumptions
conf.aov <- aov(meanResponse ~ experiment_phase*socialCondition*displayCondition, data=cSummary)
summary(conf.aov)
# Check homogeneity of variance
plot(conf.aov, 1)
leveneTest(meanResponse ~ experiment_phase*socialCondition*displayCondition, data=cSummary)
# Significant: homogeneity of variance violated
# Check normality
plot(conf.aov, 2)
aov_residuals <- residuals(object=conf.aov)
shapiro.test(x=aov_residuals)
# Significant: therefore violations of normality
hist(aov_residuals)
hist(cSummary$meanResponse)
# NHST: to report
conf.aov <- aov(meanResponse ~ experiment_phase*socialCondition*displayCondition, data=cSummary)
summary(conf.aov)
emmeans(conf.aov, ~experiment_phase)
emmeans(conf.aov, ~displayCondition)
emmeans(conf.aov, ~socialCondition)
# Bayesian
cSummary[, displayCondition:= as.factor(displayCondition)]
cSummary[, socialCondition:= as.factor(socialCondition)]
cSummary[, experiment_phase:= as.factor(experiment_phase)]
conf.bf <- anovaBF(meanResponse ~ experiment_phase*socialCondition*displayCondition, data=cSummary, whichModels="bottom")
summary(conf.bf)
# Graphs of test phase ------------------------------------------------------------------------------
tData <- data[experiment_phase=="test",]
tSummary <- tData[as.numeric(stimulusID)<=20, list(meanAccuracyTest=mean(accuracy),
meanRTtest=mean(rt)),
by=.(participant_id, displayCondition, socialCondition)]
pData <- data[experiment_phase=="test_partial",]
pSummary <- pData[as.numeric(stimulusID)<=20, list(meanAccuracyPartial=mean(accuracy),
meanRTpartial=mean(rt)),
by=.(participant_id, displayCondition, socialCondition)]
# Combine data
summary <- cbind(tSummary, pSummary[, .(meanAccuracyPartial, meanRTpartial)])
summary[, rtDiff:= meanRTtest - meanRTpartial]
summary[, meanDiff:= meanAccuracyTest - meanAccuracyPartial]
test.aov <- aov(meanDiff ~ displayCondition*socialCondition, data=summary)
summary(test.aov)
# Test phase ---------------------------------------------------------------------------------------
tData <- data[experiment_phase=="test" | experiment_phase=="test_partial",]
tSummary <- tData[as.numeric(stimulusID)<=20, list(meanAccuracy=mean(accuracy),
meanRT=mean(rt)),
by=.(participant_id, displayCondition, socialCondition, experiment_phase)]
tGraphData <- tSummary[, list(Accuracy=mean(meanAccuracy),
sdAccuracy=sd(meanAccuracy),
RT=mean(meanRT),
sdRT=sd(meanRT),
N=.N),
by=.(displayCondition, socialCondition, experiment_phase)]
tGraphData[, `:=`(accuracyError=sqrt(2)*qnorm(0.975)*sdAccuracy/sqrt(N),
rtError=sqrt(2)*qnorm(0.975)*sdRT/sqrt(N))]
ggplot(tGraphData, aes(x=displayCondition, y=Accuracy, fill=socialCondition)) +
geom_bar(position="dodge", stat="identity") +
geom_errorbar(aes(ymin=Accuracy-accuracyError, ymax=Accuracy+accuracyError), width=0.2, position=position_dodge(0.9)) +
facet_grid(~experiment_phase) +
scale_fill_viridis(discrete = T, alpha=0.8) +
theme_bw()
ggsave("../techReport/images/DSTL07testSummaryAccuracy.pdf", units="in", width=6, height=4)
ggplot(tGraphData, aes(x=displayCondition, y=RT, fill=socialCondition)) +
geom_bar(position="dodge", stat="identity") +
geom_errorbar(aes(ymin=RT-rtError, ymax=RT+rtError), width=0.2, position=position_dodge(0.9)) +
facet_grid(~experiment_phase) +
scale_fill_viridis(discrete = T, alpha=0.8) +
theme_bw()
ggsave("../techReport/images/DSTL07testSummaryRT.pdf", units="in", width=6, height=4)
# Accuracy
test.aov <- aov(meanAccuracy ~ displayCondition*socialCondition*experiment_phase +
Error(participant_id/experiment_phase), data=tSummary)
summary(test.aov)
tSummary$experiment_phase <- factor(tSummary$experiment_phase)
tSummary$displayCondition <- factor(tSummary$displayCondition)
tSummary$socialCondition <- factor(tSummary$socialCondition)
tSummary$participant_id <- factor(tSummary$participant_id)
test.bf <- anovaBF(meanAccuracy ~ displayCondition*socialCondition*experiment_phase + participant_id,
data=tSummary, whichModels="bottom", whichRandom="participant_id")
summary(test.bf)
# Reaction time
test.aov.rt <- aov(meanRT ~ displayCondition*socialCondition*experiment_phase +
Error(participant_id/experiment_phase), data=tSummary)
summary(test.aov.rt)
test.bf.rt <- anovaBF(meanRT ~ displayCondition*socialCondition*experiment_phase + participant_id,
data=tSummary, whichModels="bottom", whichRandom="participant_id")
summary(test.bf.rt)
# Verbal reports -----------------------------------------------------------------------------------
vrData <- data[experiment_phase %in% c("verbal_report_ranking_phase2",
"verbal_report_ranking_phase3"),
.(participant_id, displayCondition, socialCondition, dimensionOrder, displayOrder,
experiment_phase, responses)]
extractRank <- function(str, rank) {
strVector <- strsplit(str, ",")[[1]]
strRank <- strVector[rank]
strRank <- strsplit(strRank, "")[[1]]
response <- sum(as.numeric(strRank), na.rm=T)
response <- ifelse(response==0, NA, response)
return(response)
}
vrData[, Craft:=extractRank(responses, 1), by=.(participant_id, experiment_phase)]
vrData[, Role:=extractRank(responses, 2), by=.(participant_id, experiment_phase)]
vrData[, Status:=extractRank(responses, 3), by=.(participant_id, experiment_phase)]
vrData[, Speed:=extractRank(responses, 4), by=.(participant_id, experiment_phase)]
vrData[, Direction:=extractRank(responses, 5), by=.(participant_id, experiment_phase)]
vrData[, c("dimension1", "dimension2", "dimension3", "dimension4", "dimension5"):=as.numeric(NA)]
for (n in 1:nrow(vrData)) {
d <- vrData[n,]
dimOrder <- d$dimensionOrder
dimensionOrderVector <- strsplit(dimOrder, ",")[[1]]
for (listPlace in 1:5) {
dimension <- dimensionOrderVector[listPlace]
vrData[[paste0("dimension", listPlace)]][[n]] <- d[[dimension]]
}
}
table(vrData$experiment_phase, vrData$dimension1)
rankData <- melt(vrData[,.(participant_id, displayCondition, socialCondition, experiment_phase,
dimension1,dimension2, dimension3, dimension4, dimension5)],
id.vars=c("participant_id", "experiment_phase", "displayCondition", "socialCondition"),
measure.vars=c("dimension1", "dimension2", "dimension3", "dimension4",
"dimension5"))
rankData$variable <- as.numeric(rankData$variable)
rankData[,condition:=paste0(displayCondition, socialCondition)]
# Phase 2
cor <- cor.test(rankData[experiment_phase=="verbal_report_ranking_phase2",variable],
rankData[experiment_phase=="verbal_report_ranking_phase2",value], method="spearman",
exact=FALSE)
cor
cor <- cor.test(rankData[experiment_phase=="verbal_report_ranking_phase2" & condition=="integratedoperator",variable],
rankData[experiment_phase=="verbal_report_ranking_phase2" & condition=="integratedoperator",value], method="spearman",
exact=FALSE)
cor
cor <- cor.test(rankData[experiment_phase=="verbal_report_ranking_phase2" & condition=="integratedsuperior",variable],
rankData[experiment_phase=="verbal_report_ranking_phase2" & condition=="integratedsuperior",value], method="spearman",
exact=FALSE)
cor
cor <- cor.test(rankData[experiment_phase=="verbal_report_ranking_phase2" & condition=="separatedoperator",variable],
rankData[experiment_phase=="verbal_report_ranking_phase2" & condition=="separatedoperator",value], method="spearman",
exact=FALSE)
cor
cor <- cor.test(rankData[experiment_phase=="verbal_report_ranking_phase2" & condition=="separatedsuperior",variable],
rankData[experiment_phase=="verbal_report_ranking_phase2" & condition=="separatedsuperior",value], method="spearman",
exact=FALSE)
cor
# Phase 3
cor1 <- cor.test(rankData[experiment_phase=="verbal_report_ranking_phase3",variable],
rankData[experiment_phase=="verbal_report_ranking_phase3",value], method="spearman",
exact=FALSE)
cor1
cor <- cor.test(rankData[experiment_phase=="verbal_report_ranking_phase3" & condition=="integratedoperator",variable],
rankData[experiment_phase=="verbal_report_ranking_phase3" & condition=="integratedoperator",value], method="spearman",
exact=FALSE)
cor
cor <- cor.test(rankData[experiment_phase=="verbal_report_ranking_phase3" & condition=="integratedsuperior",variable],
rankData[experiment_phase=="verbal_report_ranking_phase3" & condition=="integratedsuperior",value], method="spearman",
exact=FALSE)
cor
cor <- cor.test(rankData[experiment_phase=="verbal_report_ranking_phase3" & condition=="separatedoperator",variable],
rankData[experiment_phase=="verbal_report_ranking_phase3" & condition=="separatedoperator",value], method="spearman",
exact=FALSE)
cor
cor <- cor.test(rankData[experiment_phase=="verbal_report_ranking_phase2" & condition=="separatedsuperior",variable],
rankData[experiment_phase=="verbal_report_ranking_phase2" & condition=="separatedsuperior",value], method="spearman",
exact=FALSE)
cor
# Comparing rank data
|
ef76cd6d1078d0d398a52acb0f820a4b48795917
|
42b248125cb6d6ce5211bd9232fc9c99be7a4159
|
/scripts/load.R
|
f2aaca85724c20a011772eabe4fb08751c14f026
|
[] |
no_license
|
STAT547-UBC-2019-20/group05
|
633275a4579cecfd3d500167e6e10739b33e92ff
|
aa76741a6bf15f6956cc15f2ba039f8dc653c38e
|
refs/heads/master
| 2021-01-16T12:28:02.682524
| 2020-04-07T20:02:06
| 2020-04-07T20:02:06
| 243,120,535
| 0
| 5
| null | 2020-04-05T21:56:22
| 2020-02-25T22:55:46
|
R
|
UTF-8
|
R
| false
| false
| 921
|
r
|
load.R
|
"This script downloads an online data file (via URL) and exports this data file into csv in the data folder.
Usage: scripts/load.R <url_to_read>" -> doc
suppressMessages(library(tidyverse))
suppressMessages(library(here))
suppressMessages(library(docopt))
suppressMessages(library(RCurl))
suppressMessages(library(readxl))
opt <- docopt(doc)
# Create data folder
dir.create("data")
main <- function(url_to_read = "https://ndownloader.figshare.com/files/18543320?private_link=74a5ea79d76ad66a8af8"){
# Download file
download.file(url = url_to_read,
destfile = (here::here("data", "Nature_PhD_Survey.xlsx")),
mode = 'wb')
# Read file
survey_raw <- readxl::read_xlsx(here::here("data", "Nature_PhD_Survey.xlsx"))
# Save as CSV for easier loading
write_csv(survey_raw, path = (here::here("data", "survey_raw.csv")))
# Print message
print("This script works!")
}
main(opt$url_to_read)
|
63bdea11cf0c7d565252781487864275d3adf5ac
|
af8b1cfa36e31284367560dac2800456de9bb284
|
/R/positioning.R
|
994945a30a29d016879829f6196b886a2f227dc0
|
[
"MIT"
] |
permissive
|
LudvigOlsen/rearrr
|
f07cdf8fe92647335fb5a26ffc1416162543c59a
|
40d150b440ae06507873fad20a28345c08d48cf3
|
refs/heads/master
| 2023-04-19T04:24:49.834419
| 2023-03-01T10:48:10
| 2023-03-01T10:48:10
| 259,158,437
| 24
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,845
|
r
|
positioning.R
|
# __________________ #< 9839bd1092dbb2df7eddcf5af55ff3f2 ># __________________
# Positioning ####
## .................. #< 517be4e0a4cdf05713d79ec1bc5e9e50 ># ..................
## Position max wrapper ####
#' @title Positions the highest values with values decreasing around it
#' @description
#' \Sexpr[results=rd, stage=render]{lifecycle::badge("experimental")}
#'
#' The highest value is positioned at the given index/quantile with the other
#' values decreasing around it.
#'
#' \strong{Example}:
#'
#' The column values:
#'
#' \code{c(1, 2, 3, 4, }\strong{\code{5}}\code{)}
#'
#' and \code{position = 2}
#'
#' are \strong{ordered as}:
#'
#' \code{c(3,} \strong{\code{5}}\code{, 4, 2, 1)}
#' @author Ludvig Renbo Olsen, \email{r-pkgs@@ludvigolsen.dk}
#' @export
#' @family rearrange functions
#' @inheritParams positioning_rearranger_
#' @return
#' The sorted \code{data.frame} (\code{tibble}) / \code{vector}.
#' @examples
#' # Attach packages
#' library(rearrr)
#' library(dplyr)
#'
#' # Set seed
#' set.seed(1)
#'
#' # Create a data frame
#' df <- data.frame(
#' "index" = 1:10,
#' "A" = sample(1:10),
#' "B" = runif(10),
#' "C" = LETTERS[1:10],
#' "G" = c(
#' 1, 1, 1, 2, 2,
#' 2, 3, 3, 3, 3
#' ),
#' stringsAsFactors = FALSE
#' )
#'
#' # Position the highest index (row number)
#' position_max(df, position = 3)$index
#' position_max(df, position = 8)$index
#'
#' # Position the maximum value in each of the columns
#' position_max(df, col = "A", position = 3)$A
#' position_max(df, col = "B", position = 3)$B
#' position_max(df, col = "C", position = 3)$C
#'
#' # Randomize which elements are left and right of the position
#' position_max(df, col = "A", position = 3, shuffle_sides = TRUE)$A
#'
#' # Grouped by G
#' df %>%
#' dplyr::select(G, A) %>% # For clarity
#' dplyr::group_by(G) %>%
#' position_max(col = "A", position = 2)
#'
#' # Plot the rearranged values
#' plot(x = 1:10, y = position_max(df, col = "B", position = 3)$B)
#' plot(x = 1:10, y = position_max(df, col = "B", position = 3, shuffle_sides = TRUE)$B)
position_max <- function(data,
col = NULL,
position = NULL,
shuffle_sides = FALSE) {
positioning_rearranger_(
data = data,
col = col,
position = position,
shuffle_sides = shuffle_sides,
what = "max"
)
}
## .................. #< 9c59547263f79174341cdf66a8365a51 ># ..................
## Position min wrapper ####
#' @title Positions the lowest value with values increasing around it
#' @description
#' \Sexpr[results=rd, stage=render]{lifecycle::badge("experimental")}
#'
#' The lowest value is positioned at the given index/quantile with the other
#' values increasing around it.
#'
#' \strong{Example}:
#'
#' The column values:
#'
#' \code{c(}\strong{\code{1}}\code{, 2, 3, 4, 5)}
#'
#' and \code{position = 2}
#'
#' are \strong{ordered as}:
#'
#' \code{c(3,} \strong{\code{1}}\code{, 2, 4, 5)}
#' @author Ludvig Renbo Olsen, \email{r-pkgs@@ludvigolsen.dk}
#' @export
#' @family rearrange functions
#' @inheritParams positioning_rearranger_
#' @return
#' The sorted \code{data.frame} (\code{tibble}) / \code{vector}.
#' @examples
#' \donttest{
#' # Attach packages
#' library(rearrr)
#' library(dplyr)
#'
#' # Set seed
#' set.seed(1)
#'
#' # Create a data frame
#' df <- data.frame(
#' "index" = 1:10,
#' "A" = sample(1:10),
#' "B" = runif(10),
#' "C" = LETTERS[1:10],
#' "G" = c(
#' 1, 1, 1, 2, 2,
#' 2, 3, 3, 3, 3
#' ),
#' stringsAsFactors = FALSE
#' )
#'
#' # Position the smallest index (row number)
#' position_min(df, position = 3)$index
#' position_min(df, position = 8)$index
#'
#' # Position the minimum value in each of the columns
#' position_min(df, col = "A", position = 3)$A
#' position_min(df, col = "B", position = 3)$B
#' position_min(df, col = "C", position = 3)$C
#'
#' # Randomize which elements are left and right of the position
#' position_min(df, col = "A", position = 3, shuffle_sides = TRUE)$A
#'
#' # Grouped by G
#' df %>%
#' dplyr::select(G, A) %>% # For clarity
#' dplyr::group_by(G) %>%
#' position_min(col = "A", position = 2)
#'
#' # Plot the rearranged values
#' plot(x = 1:10, y = position_min(df, col = "B", position = 3)$B)
#' plot(x = 1:10, y = position_min(df, col = "B", position = 3, shuffle_sides = TRUE)$B)
#' }
position_min <- function(data,
col = NULL,
position = NULL,
shuffle_sides = FALSE) {
positioning_rearranger_(
data = data,
col = col,
position = position,
shuffle_sides = shuffle_sides,
what = "min"
)
}
|
929dcfd550a59efc4487e817f46382a722a18fb1
|
bdf6c66cb5577c9eb3f3e6cc25e85743210dc6a6
|
/Plot4.R
|
f9360f887d2573adb3420abce09c218bc075b200
|
[] |
no_license
|
rohanpatil88/ExData_Plotting1
|
9820bee42eef3a15780a2f20e689456d8930ba9f
|
4472cffc191381fb23ac64b526f92016924f594d
|
refs/heads/master
| 2021-01-18T05:20:43.169184
| 2014-08-10T01:37:13
| 2014-08-10T01:37:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,627
|
r
|
Plot4.R
|
household_power_consumption <- read.csv("D:/household_power_consumption.txt", sep=";") #Loading data from txt file
household_power_consumption[household_power_consumption == "?"] <- NA #Replacing the ? with NA
household_power_consumption <- na.omit(household_power_consumption) #Removing the NA values
household_power_consumption$Global_active_power <- as.numeric(as.character(household_power_consumption$Global_active_power)) #converting factor to numeric
household_power_consumption$Date <- as.Date(household_power_consumption$Date,format ='%d/%m/%Y') #Converting Factor to date
power_set <- household_power_consumption[household_power_consumption$Date==("2007-02-01") |household_power_consumption$Date==("2007-02-02"),] #creating a subset with given conditions
power_set$Date <- paste(power_set$Date,power_set$Time,sep = " ") #Concatenation
power_set$Date <- strptime(power_set$Date,"%Y-%m-%d %H:%M:%S")#Formating Date
power_set$Sub_metering_1 <- as.numeric(as.character(power_set$Sub_metering_1))#Converting factor into numeric
power_set$Sub_metering_2 <- as.numeric(as.character(power_set$Sub_metering_2))#Converting factor into numeric
power_set$Sub_metering_3 <- as.numeric(as.character(power_set$Sub_metering_3))#Converting factor into numeric
par(mar = c(4.1,4.1,2.2,2.2)) #editing margins
par(mfrow = c(2,2)) #setting partitions
with(power_set,plot(power_set$Date,Global_active_power,xlab="",ylab="Global Active Power",type="n"))
with(power_set,lines(power_set$Date,power_set$Global_active_power))
power_set$Voltage <- as.numeric(as.character(power_set$Voltage))#converting factor into numeric
with(power_set,plot(power_set$Date,Voltage,xlab="datetime",ylab="Voltage",type="n"))
with(power_set,lines(power_set$Date,power_set$Voltage))
with(power_set,plot(power_set$Date,power_set$Sub_metering_1,xlab="",ylab="Energy Sub metering",type="n"))
with(power_set,lines(power_set$Date,power_set$Sub_metering_1,col = "black"))
with(power_set,lines(power_set$Date,power_set$Sub_metering_2,col = "Red"))
with(power_set,lines(power_set$Date,power_set$Sub_metering_3,col = "Blue"))
legend("topright", col = c("black", "red", "blue"), lty= "solid", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),cex = 0.3 ,pt.cex = 5,lwd = 1,inset = 0.05,bty = "n")
power_set$Global_reactive_power <- as.numeric(as.character(power_set$Global_reactive_power))
with(power_set,plot(power_set$Date,Global_reactive_power,xlab="datetime",ylab="Global_reactive_power",type="n"))
with(power_set,lines(power_set$Date,power_set$Global_reactive_power))
dev.copy(png,"plot4.png",width=480,height=480,units='px')#creating png file
dev.off()
|
915c69b767af1e7c8b1caf1a09dfc1e049c36d41
|
259fe6446e0f059be228f95745db1aa54ad5ce31
|
/man/layer_to_dense_DeepTRIAGE.Rd
|
a9cb26e106da97b71f8c7963620b08941759ca4d
|
[] |
no_license
|
tpq/caress
|
9fd1c306e8f6bb23f88203f6e6329a72d4689aaa
|
04386b3ab61ef9036e91ab1bbd6e42a1265b5ea9
|
refs/heads/master
| 2021-06-24T08:16:31.155396
| 2021-03-03T03:34:27
| 2021-03-03T03:34:27
| 202,971,472
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,375
|
rd
|
layer_to_dense_DeepTRIAGE.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/2-layers-DeepTRIAGE.R
\name{layer_to_dense_DeepTRIAGE}
\alias{layer_to_dense_DeepTRIAGE}
\title{Apply a DeepTRIAGE Layer}
\usage{
layer_to_dense_DeepTRIAGE(
object,
result_dim,
embed_dim = result_dim * 4,
random_embedding = FALSE,
hidden_dim = 32,
hidden_activation = "tanh",
hidden_dropout = 0.5,
name = NULL
)
}
\arguments{
\item{object}{A \code{keras} model.}
\item{result_dim}{An integer. The size of the final layer.}
\item{embed_dim}{An integer. The size of the final embedding matrix will
equal the input dimension times the embedding dimension.}
\item{random_embedding}{A boolean. Toggles whether to freeze the embedding
matrix with random values. Otherwise, the embedding matrix is trainable.}
\item{hidden_dim}{An integer. The size of the hidden layers.}
\item{hidden_activation}{A string. The activation for the hidden layers.}
\item{hidden_dropout}{A numeric. The dropout for the hidden layers.}
\item{name}{A string. The prefix label for all layers.}
}
\description{
This function applies a variant of the DeepTRIAGE attention
mechanism to the incoming layer (see DOI:10.1101/533406).
This implementation differs slightly from the publication
in that all layers have the same activation function and
the random embedding weights are optionally learnable.
}
|
431746cb188b242d72b999b61ab2451f5fa5ceae
|
7c39da976f28af016e5b1f847e68473c659ea05d
|
/R/uniprot.R
|
f62aebcde49cb7938b6dfeb6ad2073763c9fc5c6
|
[] |
no_license
|
cancer-genomics/trellis
|
b389d5e03959f8c6a4ee7f187f7749048e586e03
|
5d90b1c903c09386e239c01c10c0613bbd89bc5f
|
refs/heads/master
| 2023-02-24T05:59:44.877181
| 2023-01-09T20:38:36
| 2023-01-09T20:38:36
| 59,804,763
| 3
| 1
| null | 2023-01-11T05:22:52
| 2016-05-27T04:45:14
|
R
|
UTF-8
|
R
| false
| false
| 24,573
|
r
|
uniprot.R
|
reduceProtein <- function(gup, hugo, strwrap.width){
gup <- gup[gup$hugo==hugo]
short.desc <- as.character(gup$shrt_desc)
gup$short.desc <- short.desc
red <- reduce(gup, with.revmap=TRUE)
revmap <- red$revmap
gup2 <- relist(gup[unlist(revmap)], revmap)
## if short description is null, remove
gup3 <- endoapply(gup2, function(g){
g2 <- g[g$short.desc != "NULL"]
if(length(g2) == 0){
g2 <- g
}
g2
})
gup3 <- unlist(gup3)
isnull <- gup3$short.desc=="NULL"
strwrap.vec <- function(x){
unlist(lapply(lapply(x, strwrap, width=strwrap.width), paste, collapse="\n"))
}
if(any(isnull)){
gup3$short.desc[isnull] <- strwrap.vec(gup3$description[isnull])
}
gup3
}
removeOverlapsWithNullShortDesc <- function(gup, strwrap.width){
genes <- unique(gup$hugo)
up.list <- vector("list", length(genes))
for(i in seq_along(genes)){
up.list[[i]] <- reduceProtein(gup, genes[i], strwrap.width)
}
up.list <- GRangesList(up.list)
up <- unlist(up.list)
up
}
#' Extract features from uniprot database
#'
#' @param up uniprot features
#' @param fusions table of fusions with gene.5prime and gene.3prime columns
#' @param strwrap.width integer indicating how to wrap text for highly descriptive features in uniprot
#' @export
#' @examples
#' ## See fusions vignette
uniprotFeatures <- function(up, fusions, strwrap.width=30){
up$feature.id <- seq_len(nrow(up))
## select features to plot
feature_keys <- unique(up$feature_key)
feature_keys <- feature_keys[-grep("residue", feature_keys)]
feature_keys <- feature_keys[-grep("Repeat", feature_keys)]
feature_keys <- feature_keys[c(1:3, 5, 7, 10, 11, 14, 16, 18, 20, 21, 22, 23, 24, 26, 27, 28, 30)]
up2 <- up[up$feature_key %in% feature_keys, ]
## add back features that might have something to do with fusions
up.fusion <- up[grep("fusion", up$description), ]
up3 <- rbind(up2, up.fusion)
up3 <- up3[!duplicated(up3$feature.id), ]
up3 <- up3[order(up3$feature.id, decreasing=FALSE), ]
## some features overlap
gup <- GRanges(up3$chrom, IRanges(start=up3$start, end=up3$stop),
description=up3$description,
shrt_desc=up3$shrt_desc,
hugo=up3$hugo,
aa_len=up3$aa_len)
gup <- gup[gup$hugo %in% fusions$gene.5prime | gup$hugo %in% fusions$gene.3prime]
gup2 <- removeOverlapsWithNullShortDesc(gup, strwrap.width)
up2 <- as.data.frame(gup2)
up2$end <- up2$end
up2
}
bothGenesInUniprot <- function(x, uniprot){
gene1.in <- x$gene.5prime %in% uniprot$hugo
gene2.in <- x$gene.3prime %in% uniprot$hugo
gene1.in & gene2.in
}
whichGeneInUniprot <- function(x, uniprot){
gene1.in <- x$gene.5prime %in% uniprot$hugo
gene2.in <- x$gene.3prime %in% uniprot$hugo
setNames(c(gene1.in, gene2.in), c("gene1", "gene2"))
}
selectTx <- function(transcripts, fusions){
genes <- c(fusions$gene.5prime, fusions$gene.3prime)
##tx.ids <- strsplit(fusions$fusion, "::")[[1]]
tx.ids <- c(fusions$tx.5prime, fusions$tx.3prime)
tx.ids <- gsub("\\(promoter\\)", "", tx.ids)
roi <- transcripts[transcripts$tx_name %in% tx.ids]
names(roi) <- roi$gene_name
roi[genes]
}
genes <- function(x) {
##x <- unlist(strsplit(x$fusion, "::"))
x <- c(x$tx.5prime, x$tx.3prime)
gsub("\\(promoter\\)", "", x)
}
## copied from svovarian
exonsByTx <- function(ex, fusions){
tx <- genes(fusions)
tx1 <- tx[1]
tx2 <- tx[2]
ex1 <- ex[[tx1]]
ex2 <- ex[[tx2]]
tx1 <- GRanges(seqnames(ex1)[1],
IRanges(min(start(ex1)),
max(end(ex1))),
strand=strand(ex1)[1])
tx2 <- GRanges(seqnames(ex2)[1],
IRanges(min(start(ex2)),
max(end(ex2))),
strand=strand(ex2)[1])
tx <- c(tx1, tx2)
tx$genes <- c(fusions$gene.5prime[1], fusions$gene.3prime[1])
result <- list(exons1=ex1, exons2=ex2, transcripts=tx)
names(result) <- c(tx$genes, "transcripts")
result
}
meltReadPairs2 <- function(rdat, transcripts){
gpairs <- improper(rdat)
r1 <- as(first(gpairs), "GRanges")
r2 <- as(GenomicAlignments::last(gpairs), "GRanges")
names(r1) <- names(r2) <- NULL
genes <- transcripts$genes
r1$transcript <- genes[findOverlaps(r1, transcripts,
select="first",
ignore.strand=TRUE,
maxgap=5000)]
r2$transcript <- genes[findOverlaps(r2, transcripts,
select="first",
ignore.strand=TRUE,
maxgap=5000)]
r1$read <- "R1"
r2$read <- "R2"
r1$pair.id <- seq_len(length(r1))
r2$pair.id <- r1$pair.id
reads <- c(r1, r2)
reads
}
.select_junction <- function(x){
tab <- table(x)
prop <- max(tab)/sum(tab)
if(prop > 0.75){
jxn <- as.integer(names(tab)[which.max(tab)])
} else{
if(diff(range(x)) < 5) {
jxn <- median(jxn)
} else {
stop("no junction found")
}
}
jxn
}
basepairJunction <- function(rreads, roi){
split.reads <- rreads[rreads$is.split]
gene1.reads <- split.reads[split.reads$transcript==names(roi)[1]]
gene2.reads <- split.reads[split.reads$transcript==names(roi)[2]]
stopifnot(identical(gene1.reads$pair.id, gene2.reads$pair.id))
strands <- as.character(strand(roi))
strand1 <- strands[1]
strand2 <- strands[2]
gene1.reads$strand_refgen <- strands[1]
gene2.reads$strand_refgen <- strands[2]
## For tumor genome, assume coordinates of gene1 are same as in reference
red.gene2 <- reduce(gene2.reads)
improper.reads <- rreads[!rreads$is.split]
gene1.improper <- improper.reads[improper.reads$transcript==names(roi)[1]]
gene2.improper <- improper.reads[improper.reads$transcript==names(roi)[2]]
gene1.improper$strand_refgen <- strands[1]
gene2.improper$strand_refgen <- strands[2]
## genome and gene2 has altered coordinates
##
## Always true: the end of the gene1 split reads (5' -> 3') abuts the
## start of the gene2 split reads
## -- what changes is how we determine the start and end of the reads
if(strand1=="-"){
##
## the end of the gene1 split reads (5' -> 3') abuts the start of
## the gene2 split reads
##
## Since on minus strand, 'start' is the end of the gene1 split read
bp.gene1 <- .select_junction(start(gene1.reads))
## trim improper reads that overhang the sequence junction
starts <- pmax(bp.gene1, start(gene1.improper))
start(gene1.improper) <- pmin(starts, end(gene1.improper))
} else {
bp.gene1 <- .select_junction(end(gene1.reads))
ends <- pmin(bp.gene1[1], end(gene1.improper))
end(gene1.improper) <- pmax(ends, start(gene1.improper))
}
if(strand2=="-"){
## Since on minus strand, 'end' is the start of the gene 2 split read
bp.gene2 <- .select_junction(end(gene2.reads))
ends <- pmin(bp.gene2[1], end(gene2.improper))
end(gene2.improper) <- pmax(ends, start(gene2.improper))
} else{
bp.gene2 <- .select_junction(start(red.gene2))
starts <- pmax(bp.gene2[1], start(gene2.improper))
start(gene2.improper) <- pmin(starts, end(gene2.improper))
}
basepair.junction <- setNames(c(bp.gene1, bp.gene2), names(roi))
rreads <- c(gene1.improper, gene2.improper, gene1.reads, gene2.reads)
list(junction=basepair.junction,
rearranged.reads=rreads)
}
# copied from svovarian
harmonizeReadMcols <- function(r.reads, reads){
r.reads$pair.id <- r.reads$qname
r.reads$pair.id <- as.integer(factor(r.reads$pair.id,
levels=unique(r.reads$pair.id))) +
max(reads$pair.id)
## combine the split reads with the aberrantly spaced reads
reads2 <- granges(reads)
##reads2$rpid <- reads$rpid
reads2$transcript <- reads$transcript
reads2$read <- reads$read
reads2$pair.id <- reads$pair.id
reads2$is.split <- FALSE
reads2$qname <- NA
r.reads2 <- granges(r.reads)
##r.reads2$rpid <- r.reads$qname
r.reads2$transcript <- r.reads$transcript
r.reads2$read <- ""
r.reads2$pair.id <- r.reads$pair.id
r.reads2$is.split <- TRUE
r.reads2$qname <- r.reads$qname
reads3 <- c(reads2, r.reads2)
reads3
}
exonTracks2 <- function(exons, reads, roi){
genes <- names(roi)
gene1 <- genes[1]
gene2 <- genes[2]
exons1 <- exons[[gene1]]
exons2 <- exons[[gene2]]
exons1$gene <- gene1
exons2$gene <- gene2
strands <- as.character(strand(roi))
strand1 <- strands[1]
strand2 <- strands[2]
exons1$is_clipped <- NA
exons2$is_clipped <- NA
jxn1 <- as.integer(strsplit(roi$bp.jxn[1], ":")[[1]][[2]])
jxn2 <- as.integer(strsplit(roi$bp.jxn[2], ":")[[1]][[2]])
if(strand1=="-"){
exons1$is_clipped <- ifelse(end(exons1) < jxn1, TRUE, FALSE)
} else{
exons1$is_clipped <- ifelse(start(exons1) > jxn1, TRUE, FALSE)
}
if(strand2=="-"){
exons2$is_clipped <- ifelse(start(exons2) > jxn2, TRUE, FALSE)
} else{
exons2$is_clipped <- ifelse(end(exons2) < jxn2, TRUE, FALSE)
}
exons[[gene1]] <- exons1
exons[[gene2]] <- exons2
exons <- c(exons[[gene1]], exons[[gene2]])
exons.df <- as(exons, "data.frame")
##mexons <- meltExons(exons)
##exons.df <- as(mexons, "data.frame")
##exons.df2 <- rbind(exons.df, clipped.df)
e1 <- exons.df[exons.df$gene==genes[1], ]
e2 <- exons.df[exons.df$gene==genes[2], ]
exon_tracks <- list(gene1=e1, gene2=e2)
names(exon_tracks) <- genes
exon_tracks
}
.list_fusion_data <- function(fusion, exon_tracks, rearranged.reads){
rreads <- rearranged.reads
gene1 <- fusion$gene1
gene2 <- fusion$gene2
fusion_nm <- paste0(gene1, "-", gene2)
id <- fusion$id
id.rds <- paste0(id, ".rds")
##fusions <- readRDS(file.path("structuralvar/data/fusions/0fusions", id.rds))
##fusions <- fusions[grep(gene1, fusions$gene1), ]
rreads <- reduce(rreads, ignore.strand=TRUE, min.gapwidth=100)
names(rreads) <- unique(rearranged.reads$transcript)
e1 <- exon_tracks[[1]]
e2 <- exon_tracks[[2]]
x2 <- max(e2$end)
e2$midx <- apply(cbind(e2$start, e2$end), 1, mean)
x <- min(e1$start)
xend <- x+10e3
e1$start <- e1$start-200
e1$end <- e1$end+200
e1$midx <- apply(cbind(e1$start, e1$end), 1, mean)
exon_tracks[[1]] <- e1
exon_tracks[[2]] <- e2
e1$gene <- gene1
e2$gene <- gene2
names(exon_tracks)[1:2] <- c(gene1, gene2)
list(rearranged.reads=rreads, exons=exon_tracks, fusion=fusion_nm)
}
fuseExonTracks <- function(data.list, basepair.jxn, roi){
##genes <- strsplit(data.list[["fusion"]], "-")[[1]]
genes <- names(roi)
gene1 <- genes[1]
gene2 <- genes[2]
tx1 <- data.list$exons[[gene1]]
tx2 <- data.list$exons[[gene2]]
tx1 <- tx1[!tx1$is_clipped, ]
tx2 <- tx2[!tx2$is_clipped, ]
strands <- as.character(strand(roi))
##
## assume gene1 is fixed (reference genome coordinates are correct)
##
## We must correct the coordinates for gene2
basepair.jxn <- sapply(strsplit(basepair.jxn, ":"), "[", 2)
basepair.jxn <- as.integer(basepair.jxn)
starts <- tx2$start - basepair.jxn[2] + basepair.jxn[1]
ends <- tx2$end - basepair.jxn[2] + basepair.jxn[1]
tx2$start <- starts
tx2$end <- ends
##
## tx1 is length-0 if promoter
##
if(nrow(tx1) > 0){
tx1$sequence_junction <- basepair.jxn[1]
}
tx2$sequence_junction <- basepair.jxn[2]
#tx2$midx <- rowMeans(cbind(tx2$start, tx2$end))
fused <- rbind(tx1, tx2)
list(exons=fused, rearranged.reads=splitReads(data.list[["rlist"]]))
}
fuseExonTracksPM <- function(data.list, basepair.jxn, roi){
##genes <- strsplit(data.list[["fusion"]], "-")[[1]]
genes <- names(roi)
gene1 <- 1
gene2 <- 2
tx1 <- data.list$exons[[gene1]]
tx2 <- data.list$exons[[gene2]]
tx1 <- tx1[!tx1$is_clipped, ]
tx2 <- tx2[!tx2$is_clipped, ]
strands <- as.character(strand(roi))
##
## assume gene1 is fixed (reference genome coordinates are correct)
##
## We must correct the coordinates for gene2
starts <- abs(tx2$start - basepair.jxn[gene2]) + basepair.jxn[gene1]
ends <- abs(tx2$end - basepair.jxn[gene2]) + basepair.jxn[gene1]
tx2$start <- starts
tx2$end <- ends
tx1$sequence_junction <- basepair.jxn[gene1]
tx2$sequence_junction <- basepair.jxn[gene2]
tx2$midx <- rowMeans(cbind(tx2$start, tx2$end))
fused <- rbind(tx1, tx2)
list(exons=fused, rearranged.reads=data.list$rearranged.reads)
}
proteinFeatures <- function(up, gene){
features <- up[up$hugo==gene, ]
features$midx <- rowMeans(cbind(features$start, features$end))
features
}
proteinParams <- function(gene, is.first, description.size=1){
if(is.first){
p <- list(protein=gene,
is.first=TRUE,
background.color="lightblue",
domain.color="steelblue",
clipped.color="gray60")
} else{
p <- list(protein=gene,
is.first=FALSE,
background.color="beige",
domain.color="darkolivegreen",
clipped.color="gray60")
}
p$description.size <- description.size
p
}
aaJunction <- function(rear, roi, cds.roi, bs.genome){
tx.names <- setNames(roi$gene_name, roi$tx_name)
cds.full <- fullTranscripts(cds.roi)
names(cds.full) <- tx.names[names(cds.full)]
cds.clipped <- clip(cds.roi)
fused.tx <- fuse(cds.clipped)
fused.protein <- tumorProtein(bs.genome, fused.tx)
ref.protein <- referenceProtein(bs.genome, cds.full, tx.names)
if(length(left(cds.clipped)) > 0){
cds.clipped <- GRangesList(list(left(cds.clipped)[[1]], right(cds.clipped)[[1]]))
} else{
cds.clipped <- GRangesList(list(unlist(left(cds.clipped)),
right(cds.clipped)[[1]]))
}
names(cds.clipped) <- tx.names[names(tx.names)]
clipped.5prime <- referenceProtein(bs.genome, cds.clipped, tx.names[1])
AA.break.5prime <- length(clipped.5prime[[1]])
genes <- as.character(tx.names)
nclipped.bases <- sum(width(cds.full[[genes[2]]])) -
sum(width(cds.clipped[[genes[2]]]))
nclipped.aa <- nclipped.bases/3
AA.break.3prime <- nclipped.aa
breaks <- c(AA.break.5prime, AA.break.3prime)
breaks <- as.integer(round(breaks, 0))
names(breaks) <- c("5'", "3'")
breaks
}
clippedProtein <- function(p.dat, params, aa.jxn){
if(params$is.first){
clip.start <- aa.jxn
clip.end <- p.dat$aa_len[1]
p.dat$is.clipped <- p.dat$start > aa.jxn
} else{
clip.start <- 0
clip.end <- aa.jxn
p.dat$is.clipped <- p.dat$start < aa.jxn
}
p.dat$clip.start <- clip.start
p.dat$clip.end <- clip.end
p.dat$aa.jxn <- aa.jxn
p.dat
}
fuseProteinTracks <- function(data.list){
## no strand issues with proteins -- already taken care of
fusion <- rbind(data.list[[1]], data.list[[2]])
## add row that will the size of the clipped protein
p1.domains <- data.list[[1]]
## coordinates of first protein 1 - aa.jxn
p1 <- p1.domains[1, ]
p1$start <- 1
p1$end <- p1$aa.jxn
##p1$is.clipped <- FALSE
##p1$aa_len <- p1$end
p1 <- data.frame(seqnames=p1$seqnames, start=p1$start, end=p1$end,
hugo=p1$hugo)
aa.jxn <- p1$end
junction.in.domain <- p1.domains$start < aa.jxn & aa.jxn < p1.domains$end
if(any(junction.in.domain)){
index <- which(junction.in.domain)
p1.domains$end[index] <- p1.domains$end[index] <- aa.jxn
p1.domains$midx <- rowMeans(cbind(p1.domains$start, p1.domains$end))
}
p1.domains <- p1.domains[p1.domains$end <= aa.jxn, ]
## p2 coordinates: aa.jxn -> aa_len
p2 <- data.list[[2]][1, ]
aa.jxn2 <- p2$aa.jxn[1]
p2$is.clipped <- FALSE
p2$start <- aa.jxn
p2$end <- p2$aa_len - aa.jxn2 + aa.jxn
##p2$aa_len <- p2$end
p2 <- data.frame(seqnames=p2$seqnames, start=p2$start, end=p2$end,
hugo=p2$hugo)
p2.domains <- data.list[[2]]
## does new junction occur within a domain
junction.in.domain <- p2.domains$start < aa.jxn2 & aa.jxn2 < p2.domains$end
if(any(junction.in.domain)){
index <- which(junction.in.domain)
p2.domains$end[index] <- p2.domains$end[index] <- aa.jxn2
}
p2.domains <- p2.domains[p2.domains$start >= aa.jxn2, , drop=FALSE]
if(nrow(p2.domains) > 0){
## adjust start and end coordinates
p2.domains$start <- p2.domains$start - aa.jxn2 + aa.jxn
p2.domains$end <- p2.domains$end - aa.jxn2 + aa.jxn
p2.domains$end <- p2.domains$end
##p2.domains$aa_len <- p2$aa_len
p2.domains$aa.jxn <- aa.jxn
p2.domains$midx <- rowMeans(cbind(p2.domains$start, p2.domains$end))
}
coords <- rbind(p1[1, ], p2[1, ])
domains <- rbind(p1.domains, p2.domains)
domains$aa.jxn <- rep(aa.jxn, nrow(domains))
list(coords=coords, domains=domains)
}
geneParams <- function(roi){
genes <- names(roi)
strand <- as.character(strand(roi))
gene1.params <- list(gene.name=genes[1],
background.color="lightblue",
exon.color="steelblue",
clipped.color="gray60",
is.first=TRUE,
strand=strand[1],
rearrangedStrand=strand[1])
##
## By convention, assume that the strand of gene 2 in the rearranged genome is
## on the same strand as gene 1.
##
gene2.params <- list(gene.name=genes[2],
background.color="beige",
exon.color="darkolivegreen",
clipped.color="gray60",
is.first=FALSE,
strand=strand[2],
rearrangedStrand=strand[1])
list(gene1.params, gene2.params)
}
## #' Collect transcript and protein-level data supporting a fusion for into a list object for subsequent plotting
## #'
## #'
## #' @param build length-one character vector providing genome build
## #' @param data.list a list object created by \code{listFusionData}
## #' @export
## #' @return a named list
## fusionData <- function(data.list, build=c("hg19", "hg18")){
## x <- data.list[["fusions"]]
## rlist <- data.list[["rlist"]]
## uniprot <- data.list[["uniprot"]]
##
## transcripts <- loadTx(build)
## roi <- selectTx(transcripts, x)
## chroms <- as.character(seqnames(roi))
##
## rid <- x$rearrangement.id
## if(length(rid) > 1) stop("no rearrangements")
## r <- rlist[[rid]]
##
## txdb <- loadTxDb(match.arg(build))
## cds.all <- suppressWarnings(cdsBy(txdb, "tx",
## use.names=TRUE))
## exons.txdb <- suppressWarnings(exonsBy(txdb, "tx", use.names=TRUE))
##
## genes <- c(x$gene1, x$gene2)
## exons <- exonsByTx(exons.txdb, x)
##
## reads <- meltReadPairs2(r, exons$transcripts)
## readclusters <- exons$transcripts
##
## build <- "hg19"
## bs.pkg <- paste0("BSgenome.Hsapiens.UCSC.", build)
## bs.genome <- getBSgenome(bs.pkg)
##
## reads <- meltReadPairs2(r, exons$transcripts)
## readclusters <- exons$transcripts
##
## r.reads <- sr[[rid]]
## ix <- findOverlaps(r.reads, readclusters, select="first", maxgap=5000)
## r.reads$transcript <- readclusters$genes[ix]
## rreads <- harmonizeReadMcols(r.reads, reads)
## bp.jxn <- basepairJunction(rreads, roi)
## if(is.null(bp.jxn)){
## reverse.roi <- TRUE
## bp.jxn <- basepairJunction(rreads, roi[2:1])
## } else reverse.roi <- FALSE
##
## rreads <- bp.jxn[["rearranged.reads"]]
## bp.jxn <- bp.jxn[["junction"]]
## roi$bp.jxn <- bp.jxn[names(roi)]
## exon.tracks <- exonTracks2(exons, rreads, roi)
## fusion.dat <- .list_fusion_data(x, exon.tracks, rreads)
##
## strands <- as.character(strand(roi))
## strands <- paste(strands, collapse="")
## fused.transcripts <- fuseExonTracks(fusion.dat, bp.jxn, roi)
## if(strands=="+-"){
## fused.transcripts <- fuseExonTracksPM(fusion.dat, bp.jxn, roi)
## }
##
## p1 <- proteinFeatures(uniprot, genes[1])
## p2 <- proteinFeatures(uniprot, genes[2])
## p1.params <- proteinParams(genes[1], TRUE, description.size=2)
## p2.params <- proteinParams(genes[2], FALSE, description.size=2)
## g.params <- geneParams(roi)
##
## cds.roi <- getCDS(r, roi, cds.all)
## aa.jxns <- aaJunction(r, roi, cds.roi, bs.genome)
## roi$aa.jxn <- aa.jxns
##
## p1.clipped <- clippedProtein(p1, p1.params, aa.jxns["5'"])
## p2.clipped <- clippedProtein(p2, p2.params, aa.jxns["3'"])
## clip.list <- list(p1.clipped, p2.clipped)
## names(clip.list) <- genes
## p.fusions <- fuseProteinTracks(clip.list)
##
## rreads.df <- as.data.frame(rreads)
## list(chroms=chroms,
## roi=roi,
## fusion.dat=fusion.dat,
## g.params=g.params,
## fused.transcripts=fused.transcripts,
## rreads=rreads.df,
## reverse.roi=reverse.roi,
## protein1=p1,
## protein2=p2,
## protein1.clipped=p1.clipped,
## protein2.clipped=p2.clipped,
## protein1.params=p1.params,
## protein2.params=p2.params,
## protein.fusion=p.fusions)
## }
## fusionData2 <- function(data.list, build=c("hg19", "hg18")){
## x <- data.list[["fusions"]]
## rlist <- data.list[["rlist"]]
## uniprot <- data.list[["uniprot"]]
##
## transcripts <- loadTx(build)
## roi <- selectTx(transcripts, x)
## chroms <- as.character(seqnames(roi))
##
## rid <- x$rearrangement.id
## if(length(rid) > 1) stop("no rearrangements")
## r <- rlist[[rid]]
##
## txdb <- loadTxDb(match.arg(build))
## cds.all <- suppressWarnings(cdsBy(txdb, "tx",
## use.names=TRUE))
## exons.txdb <- suppressWarnings(exonsBy(txdb, "tx", use.names=TRUE))
##
## genes <- c(x$gene1, x$gene2)
## exons <- exonsByTx(exons.txdb, x)
##
## reads <- meltReadPairs2(r, exons$transcripts)
## readclusters <- exons$transcripts
##
## build <- "hg19"
## bs.pkg <- paste0("BSgenome.Hsapiens.UCSC.", build)
## bs.genome <- getBSgenome(bs.pkg)
##
## reads <- meltReadPairs2(r, exons$transcripts)
## readclusters <- exons$transcripts
##
## r.reads <- sr[[rid]]
## ix <- findOverlaps(r.reads, readclusters, select="first", maxgap=5000)
## r.reads$transcript <- readclusters$genes[ix]
## rreads <- harmonizeReadMcols(r.reads, reads)
## bp.jxn <- basepairJunction(rreads, roi)
## if(is.null(bp.jxn)){
## reverse.roi <- TRUE
## bp.jxn <- basepairJunction(rreads, roi[2:1])
## } else reverse.roi <- FALSE
##
## rreads <- bp.jxn[["rearranged.reads"]]
## bp.jxn <- bp.jxn[["junction"]]
## roi$bp.jxn <- bp.jxn[names(roi)]
## exon.tracks <- exonTracks2(exons, rreads, roi)
## fusion.dat <- .list_fusion_data(x, exon.tracks, rreads)
##
## strands <- as.character(strand(roi))
## strands <- paste(strands, collapse="")
## fused.transcripts <- fuseExonTracks(fusion.dat, bp.jxn, roi)
## if(strands=="+-"){
## fused.transcripts <- fuseExonTracksPM(fusion.dat, bp.jxn, roi)
## }
##
## p1 <- proteinFeatures(uniprot, genes[1])
## p2 <- proteinFeatures(uniprot, genes[2])
## p1.params <- proteinParams(genes[1], TRUE, description.size=2)
## p2.params <- proteinParams(genes[2], FALSE, description.size=2)
## g.params <- geneParams(roi)
##
## cds.roi <- getCDS(r, roi, cds.all)
## aa.jxns <- aaJunction(r, roi, cds.roi, bs.genome)
## roi$aa.jxn <- aa.jxns
##
## p1.clipped <- clippedProtein(p1, p1.params, aa.jxns["5'"])
## p2.clipped <- clippedProtein(p2, p2.params, aa.jxns["3'"])
## clip.list <- list(p1.clipped, p2.clipped)
## names(clip.list) <- genes
## p.fusions <- fuseProteinTracks(clip.list)
##
## rreads.df <- as.data.frame(rreads)
## list(chroms=chroms,
## roi=roi,
## fusion.dat=fusion.dat,
## g.params=g.params,
## fused.transcripts=fused.transcripts,
## rreads=rreads.df,
## reverse.roi=reverse.roi,
## protein1=p1,
## protein2=p2,
## protein1.clipped=p1.clipped,
## protein2.clipped=p2.clipped,
## protein1.params=p1.params,
## protein2.params=p2.params,
## protein.fusion=p.fusions)
## }
#' Collect fusion-related data into a single list
#'
#' @param fusions an object returned by \code{fusionList}
#' @param rlist a \code{RearrangementList}
#' @return a named list
#' @export
listFusionData <- function(rlist, fusions){
extdata <- system.file("extdata", package="svfusions")
up <- readRDS(file.path(extdata, "uniprot.rds"))
up2 <- uniprotFeatures(up, fusions, strwrap.width=20)
both.genes <- bothGenesInUniprot(fusions, up2)
if(!any(both.genes)){
in.uniprot <- whichGeneInUniprot(fusions, up2)
msg <- "Both genes are not in uniprot."
message(msg)
return(in.uniprot)
}
x <- fusions[both.genes, ]
List(rlist=rlist,
fusions=x,
uniprot=up2,
split_reads=splitReads(rlist))
}
|
3a045c1e386f7aee0e47921c10cbad7bb95976a5
|
e9eefc7f75f79219f12952a5a0753db7125e54c0
|
/man/print.femeta.Rd
|
90177cf3c600abaee9a4729f961b5e64f07bb685
|
[] |
no_license
|
KenLi93/FEMetaBin
|
0eac83e4f5866a7490e68c61f4928e1bfb69e22f
|
f79ab41f9c0e8853cbb42894d7b56110051b69f8
|
refs/heads/master
| 2020-08-14T06:57:23.038430
| 2019-10-16T17:14:37
| 2019-10-16T17:14:37
| 215,117,738
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 600
|
rd
|
print.femeta.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/femeta.R
\name{print.femeta}
\alias{print.femeta}
\title{Printing results of fixed-effects meta-analysis}
\usage{
\method{print}{femeta}(x, digits = getOption("digits"), prefix = "\\t",
...)
}
\arguments{
\item{x}{object of class "femeta"}
\item{digits}{number of significant digits to be used}
\item{prefix}{string, passed to strwrap for displaying the method component of the \code{femeta} object.}
\item{...}{other arguments passed to the method.}
}
\description{
Printing results of fixed-effects meta-analysis
}
|
25f52454dd2cea1f8fe2b26317f720991335c864
|
703bef959e3644411c7e7c9acb9953cd57652d5c
|
/ex_1_5.R
|
710855e8f15d9d69269d438188acfadb249cb9b8
|
[
"MIT"
] |
permissive
|
crazymidnight/learn-r
|
8dd9342bbf1ae4bb3621c1ee62c932f96316f044
|
f4ecd2493fa7f80e070eb3840aa7cf99b8594f6e
|
refs/heads/master
| 2020-04-23T13:48:22.208063
| 2019-06-05T19:22:43
| 2019-06-05T19:22:43
| 171,210,026
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 354
|
r
|
ex_1_5.R
|
# A
resA <- matrix(c(4.3, 3.1, 8.2, 8.2, 3.2, 0.9, 1.6, 6.5), nrow=4, ncol=2, byrow=TRUE)
print(resA)
# B
resB <- resA[-3,]
print(dim(resB))
# C
resC <- resA
resC[,2] <- sort(resC[,2])
print(resC)
# D
resD <- resC[-4, -1]
print(resD)
print(matrix(resD))
# E
resE <- resC[3:4,]
print(resE)
# F
resC[c(4, 1), c(2, 1)] <- -0.5 * diag(resE)
print(resC)
|
80c1f420a840bad7da0d8a72c2ed30f0d2e5191d
|
627119064049fb9cf070d73e3766cceac02ab514
|
/R/ltza.R
|
0bb09779b4b178f3f41b31c13236b5600ecf894d
|
[] |
no_license
|
cran/HKprocess
|
7061c227c0593010f2abde4db3fea1fd008aed31
|
b7aea6c4c497d4b822c11a460390029ab09537e7
|
refs/heads/master
| 2022-11-14T08:24:10.955085
| 2022-10-26T21:17:59
| 2022-10-26T21:17:59
| 49,682,780
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 434
|
r
|
ltza.R
|
ltza <- function(r, x) {
nx <- as.integer(length(x))
EPS <- .Machine$double.eps
# Call the ltza.c from the C library
out <- .C("ltza",as.double(r),nx,as.double(x),nx,EPS,
tr = array(0,dim = c(1,4)),fault = as.integer(1),PACKAGE = "HKprocess")
return(setNames(as.vector(out$tr),c("t(x) * solve(R) * x",
"t(en) * solve(R) * x","t(en) * solve(R) * en",
"natural logarithm of determinant")))
}
|
e5bfcadfa8d83d0e29a7fd158257804c96114d0f
|
d7dd84b329e0489dd72cc6da8c760e61a5bb1953
|
/fmac-predict/.Rprofile
|
c713add8d84b8e6767fd8c08c637901044831ac0
|
[
"Apache-2.0"
] |
permissive
|
seanahmad/fin-pyth-examples
|
3a4752f8d0e3ea63748d7f9c25c62c442934b8be
|
414f7661ca1f6d8022872f38179ce3aff08534f3
|
refs/heads/master
| 2023-07-18T04:02:38.387745
| 2019-06-22T14:04:14
| 2019-06-22T14:04:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 54
|
rprofile
|
.Rprofile
|
Sys.setenv(RETICULATE_PYTHON = "~/python/bin/python")
|
23c6f1a786f000ca733b0d491e030901b10c9ca8
|
b6bd266b6b10290665231f1cc9bc892b51cf6716
|
/man/pm25_2019.Rd
|
da1f6f78f2f9e4cea55313dc228605fc2d90e272
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
tereom/estcomp
|
9a95e9a0be674d1f029801d3818a8aee8cf3f718
|
817f7e20ab82bffd064db4ccd68f5303a72844e5
|
refs/heads/master
| 2020-06-30T15:26:14.627799
| 2019-11-05T16:17:34
| 2019-11-05T16:17:34
| 200,871,105
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 692
|
rd
|
pm25_2019.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pm25_2019.R
\docType{data}
\name{pm25_2019}
\alias{pm25_2019}
\title{PM2.5 pollutant measurements in Mexico City}
\format{A data frame with 5088 rows and 26 columns:
\describe{
\item{date}{Date when the measurment was taken}
\item{hour}{Hour at which the measurment was taken, 1 to 24}
\item{AJM, AJU,...,XAL}{Initials indication the monitoring station}
}}
\usage{
pm25_2019
}
\description{
A data set containing the measuremnts of PM2.5, particles smaller than 2.5
micrometers measured in micrograms per square meter, across 70 stations
within Mexico City for the first months of 2019.
}
\keyword{datasets}
|
a6405c56e88fb3209a1cd296c78c52579fa7f299
|
e68e99f52f3869c60d6488f0492905af4165aa64
|
/R/nn.R
|
1084704388d825071d9bb0e192ae91ba025a59f7
|
[
"MIT"
] |
permissive
|
mlverse/torch
|
a6a47e1defe44b9c041bc66504125ad6ee9c6db3
|
f957d601c0295d31df96f8be7732b95917371acd
|
refs/heads/main
| 2023-09-01T00:06:13.550381
| 2023-08-30T17:44:46
| 2023-08-30T17:44:46
| 232,347,878
| 448
| 86
|
NOASSERTION
| 2023-09-11T15:22:22
| 2020-01-07T14:56:32
|
C++
|
UTF-8
|
R
| false
| false
| 25,817
|
r
|
nn.R
|
#' @include utils-data.R
NULL
get_inherited_classes <- function(inherit) {
inherit_class <- inherit$public_fields$.classes
# Filter out classes that we eventually add in our normal flow.
inherit_class <- inherit_class[inherit_class != "nn_Module"]
inherit_class <- inherit_class[inherit_class != "R6ClassGenerator"]
inherit_class <- inherit_class[inherit_class != "nn_module_generator"]
inherit_class <- inherit_class[inherit_class != "nn_module"]
inherit_class <- inherit_class[!duplicated(inherit_class, fromLast = TRUE)]
inherit_class
}
nn_Module <- R6::R6Class(
classname = "nn_Module",
lock_objects = FALSE,
public = list(
training = TRUE,
initialize = function() {},
forward = function(...) {
not_implemented_error("Forward method is not implemented")
},
add_module = function(name, module) {
self$register_module(name, module)
},
register_module = function(name, module) {
if (is.numeric(name)) {
name <- as.character(name)
}
private$modules_[[name]] <- module
},
register_parameter = function(name, param) {
private$parameters_[[name]] <- param
},
register_buffer = function(name, tensor, persistent = TRUE) {
private$buffers_[[name]] <- tensor
if (persistent) {
private$non_persistent_buffers_ <- private$non_persistent_buffers_[
private$non_persistent_buffers_ != name
]
} else {
private$non_persistent_buffers_ <- unique(c(
private$non_persistent_buffers_,
name
))
}
},
train = function(mode = TRUE) {
self$training <- mode
lapply(private$modules_, function(m) m$train(mode))
invisible(create_nn_module_callable(self))
},
eval = function() {
self$train(FALSE)
},
.apply = function(fn) {
for (module in private$modules_) {
module$.apply(fn)
}
for (param_name in names(private$parameters_)) {
param <- private$parameters_[[param_name]]
if (!is.null(param)) {
# Tensors stored in modules are graph leaves, and we don't want to
# track autograd history of `param_applied`, so we have to use
# `with torch.no_grad():`
with_no_grad({
param_applied <- fn(param)
})
private$parameters_[[param_name]] <-
nn_parameter(param_applied, param$requires_grad)
}
if (!is.null(param) && !is_undefined_tensor(param$grad)) {
with_no_grad({
grad_applied <- fn(param$grad)
})
grad_applied$requires_grad_(param$grad$requires_grad)
private$parameters_[[param_name]]$set_grad_(grad_applied)
}
}
for (buf_name in names(private$buffers_)) {
buf <- private$buffers_[[buf_name]]
if (!is.null(buf)) {
private$buffers_[[buf_name]] <- fn(buf)
}
}
invisible(create_nn_module_callable(self))
},
cuda = function(device = NULL) {
self$.apply(function(x) x$cuda())
},
cpu = function() {
self$.apply(function(x) x$cpu())
},
to = function(dtype = NULL, device = NULL, tensor = NULL, non_blocking = FALSE, copy = FALSE,
memory_format = torch_preserve_format()) {
if (!is.null(dtype)) {
if (!dtype$is_floating_point) {
value_error("nn.Module.to only accepts floating point '
'dtypes, but got desired dtype {dtype}")
}
}
self$.apply(function(x) {
if (x$is_floating_point()) {
x$to(dtype, device, tensor, non_blocking, copy, memory_format)
} else {
x$to(
device = device, non_blocking = non_blocking, copy = copy,
memory_format = memory_format
)
}
})
},
print = function() {
print_nn_module(self, private)
},
.save_to_state_dict = function(prefix, keepvars) {
out <- list()
for (param_name in names(private$parameters_)) {
param <- private$parameters_[[param_name]]
if (!is.null(param)) {
if (!keepvars) {
param$detach
}
out[[paste0(prefix, param_name)]] <- keepvars_or_detach(param, keepvars)
}
}
for (buf_name in names(private$buffers_)) {
buf <- private$buffers_[[buf_name]]
if (!is.null(buf) && !(buf_name %in% private$non_persistent_buffers_)) {
out[[paste0(prefix, buf_name)]] <- keepvars_or_detach(buf, keepvars)
}
}
out
},
state_dict = function(prefix = "", keepvars = FALSE) {
out <- list()
out <- c(out, self$.save_to_state_dict(prefix, keepvars))
for (module_name in names(private$modules_)) {
module <- private$modules_[[module_name]]
if (!is.null(module)) {
out <- c(out, module$state_dict(
prefix = paste0(prefix, module_name, "."),
keepvars = keepvars
))
}
}
out
},
.load_from_state_dict = function(state_dict, prefix) {
persistent_buffers <- private$buffers_[!names(private$buffers_) %in% private$non_persistent_buffers_]
local_name_params <- c(private$parameters_, persistent_buffers)
local_state <- local_name_params[!sapply(local_name_params, is.null)]
for (name in names(local_state)) {
key <- paste0(prefix, name)
if (key %in% names(state_dict)) {
input_param <- state_dict[[key]]
param <- local_state[[name]]
if (!self$..refer_to_state_dict..) {
with_no_grad({
param$copy_(input_param)
})
} else {
# setting requires grad is ignored if param is not a valid pointer
# be careful!
if (!is_null_external_pointer(param)) {
input_param$requires_grad_(param$requires_grad)
}
if (name %in% names(persistent_buffers)) {
private$buffers_[[name]] <- input_param
} else {
private$parameters_[[name]] <- input_param
}
}
} else {
value_error("Could not find {key} in the state_dict.")
}
}
},
load_state_dict = function(state_dict, ..., .refer_to_state_dict = FALSE) {
# by default the state dict parameter values are copied into the parameters
# of the modules. with `.refer_to_state_dict` you can make the parameters
# refer to the tensors in the state dict. USE WITH CAUTION as it's easy to
# mess up and link the parametrs of two models that way. This is useful when
# you want to initialize the model with the state dict values and will dispose
# of the state dict rightly after.
load <- function(module, state_dict, prefix = "") {
module$..refer_to_state_dict.. <- .refer_to_state_dict
module$.load_from_state_dict(state_dict, prefix)
for (nm in names(module$.__enclos_env__$private$modules_)) {
child <- module$.__enclos_env__$private$modules_[[nm]]
if (!is.null(child)) {
load(child, state_dict, prefix = paste0(prefix, nm, "."))
}
}
}
load(self, state_dict)
invisible(create_nn_module_callable(self))
},
zero_grad = function() {
for (p in self$parameters) {
if (!is_undefined_tensor(p$grad)) {
p$grad$detach_()
p$grad$zero_()
}
}
},
apply = function(fn) {
for (module in private$modules_) {
module$apply(fn)
}
fn(self)
invisible(create_nn_module_callable(self))
},
named_parameters = function(recursive = TRUE) {
if (recursive) {
self$parameters
} else {
private$parameters_
}
},
named_buffers = function(recursive = TRUE) {
if (recursive) {
self$buffers
} else {
private$buffers_
}
},
.replace_values_from_table = function(table) {
for (i in seq_along(private$modules_)) {
module <- private$modules_[[i]]
private$modules_[[i]] <- table[[rlang::obj_address(module)]] %||% module
}
lapply(private$modules_, function(x) x$.replace_values_from_table(table))
for (i in seq_along(private$parameters_)) {
par <- private$parameters_[[i]]
# par or buf might not be available in `table` if, for some reason they
# have already been replaced. This happens for example, when a module
# has the same layer twice. this also applies for modules, they might be duplicated
private$parameters_[[i]] <- table[[xptr_address(par)]] %||% par
}
for (i in seq_along(private$buffers_)) {
buf <- private$buffers_[[i]]
private$buffers_[[i]] <- table[[xptr_address(buf)]] %||% buf
}
}
),
private = list(
parameters_ = list(),
buffers_ = list(),
modules_ = list(),
non_persistent_buffers_ = character()
),
active = list(
parameters = function(value, recursive = TRUE) {
if (!missing(value)) {
runtime_error(
"It's not possible to modify the parameters list.\n",
" You can modify the parameter in-place or use",
" `module$parameter_name <- new_value`"
)
}
pars <- lapply(private$modules_, function(x) x$parameters)
pars <- append(pars, self$named_parameters(recursive = FALSE))
pars <- unlist(pars, recursive = TRUE, use.names = TRUE)
pars <- pars[!duplicated(pars)] # unique doesn't preserve the names
pars
},
buffers = function(value) {
if (!missing(value)) {
runtime_error(
"It's not possible to modify the buffers list.\n",
" You can modify the parameter in-place or use",
" `module$parameter_name <- new_value`"
)
}
bufs <- lapply(private$modules_, function(x) x$buffers)
bufs <- append(bufs, self$named_buffers(recursive = FALSE))
bufs <- unlist(bufs, recursive = TRUE, use.names = TRUE)
bufs <- bufs[!duplicated(bufs)] # unique doesn't preserve the names
bufs
},
modules = function(value) {
if (!missing(value)) {
runtime_error(
"It's not possible to modify the modules list.\n",
" You can modify the modules in-place"
)
}
modules <- lapply(private$modules_, function(x) x$modules)
# the self instance is an nn_Module, not nn_module
modules <- append(create_nn_module_callable(self), modules)
modules <- unlist(modules)
# to check if modules are iddentical we need to compare the
# R6 instances.
module_instances <- lapply(modules, function(x) attr(x, "module"))
modules <- modules[!duplicated(module_instances)]
modules
},
children = function(value) {
if (!missing(value)) {
runtime_error(
"It's not possible to modify the children list.\n",
" You can modify the modules in-place"
)
}
private$modules_
}
)
)
#' Creates an `nn_parameter`
#'
#' Indicates to nn_module that `x` is a parameter
#'
#' @param x the tensor that you want to indicate as parameter
#' @param requires_grad whether this parameter should have
#' `requires_grad = TRUE`
#'
#' @export
nn_parameter <- function(x, requires_grad = TRUE) {
if (!is_torch_tensor(x)) {
stop("`x` must be a tensor.")
}
x$requires_grad_(requires_grad)
class(x) <- c(class(x), "nn_parameter")
x
}
#' Checks if an object is a nn_parameter
#'
#' @param x the object to check
#'
#' @export
is_nn_parameter <- function(x) {
inherits(x, "nn_parameter")
}
#' Creates a nn_buffer
#'
#' Indicates that a tensor is a buffer in a nn_module
#'
#' @param x the tensor that will be converted to nn_buffer
#' @param persistent whether the buffer should be persistent or not.
#'
#' @export
nn_buffer <- function(x, persistent = TRUE) {
class(x) <- c(class(x), "nn_buffer")
attr(x, "persistent") <- persistent
x
}
#' Checks if the object is a nn_buffer
#'
#' @param x object to check
#'
#' @export
is_nn_buffer <- function(x) {
inherits(x, "nn_buffer")
}
#' Checks if the object is an nn_module
#'
#' @param x object to check
#'
#' @export
is_nn_module <- function(x) {
inherits(x, "nn_module") && !inherits(x, "nn_module_generator")
}
#' Base class for all neural network modules.
#'
#' Your models should also subclass this class.
#'
#' Modules can also contain other Modules, allowing to nest them in a tree
#' structure. You can assign the submodules as regular attributes.
#'
#' You are expected to implement the `initialize` and the `forward` to create a
#' new `nn_module`.
#'
#' @section Initialize:
#'
#' The initialize function will be called whenever a new instance of the `nn_module`
#' is created. We use the initialize functions to define submodules and parameters
#' of the module. For example:
#'
#' ```
#' initialize = function(input_size, output_size) {
#' self$conv1 <- nn_conv2d(input_size, output_size, 5)
#' self$conv2 <- nn_conv2d(output_size, output_size, 5)
#' }
#' ```
#'
#' The initialize function can have any number of parameters. All objects
#' assigned to `self$` will be available for other methods that you implement.
#' Tensors wrapped with [nn_parameter()] or [nn_buffer()] and submodules are
#' automatically tracked when assigned to `self$`.
#'
#' The initialize function is optional if the module you are defining doesn't
#' have weights, submodules or buffers.
#'
#' @section Forward:
#'
#' The forward method is called whenever an instance of `nn_module` is called.
#' This is usually used to implement the computation that the module does with
#' the weights ad submodules defined in the `initialize` function.
#'
#' For example:
#'
#' ```
#' forward = function(input) {
#' input <- self$conv1(input)
#' input <- nnf_relu(input)
#' input <- self$conv2(input)
#' input <- nnf_relu(input)
#' input
#' }
#' ```
#'
#' The `forward` function can use the `self$training` attribute to make different
#' computations depending wether the model is training or not, for example if you
#' were implementing the dropout module.
#'
#' @param classname an optional name for the module
#' @param inherit an optional module to inherit from
#' @param ... methods implementation
#' @param private passed to [R6::R6Class()].
#' @param active passed to [R6::R6Class()].
#' @param parent_env passed to [R6::R6Class()].
#'
#' @examples
#' model <- nn_module(
#' initialize = function() {
#' self$conv1 <- nn_conv2d(1, 20, 5)
#' self$conv2 <- nn_conv2d(20, 20, 5)
#' },
#' forward = function(input) {
#' input <- self$conv1(input)
#' input <- nnf_relu(input)
#' input <- self$conv2(input)
#' input <- nnf_relu(input)
#' input
#' }
#' )
#' @export
nn_module <- function(classname = NULL, inherit = nn_Module, ...,
private = NULL, active = NULL,
parent_env = parent.frame()) {
if (inherits(inherit, "nn_module")) {
inherit <- attr(inherit, "module")
}
e <- new.env(parent = parent_env)
e$inherit <- inherit
inherit_class <- get_inherited_classes(inherit)
classes <- c(classname, inherit_class, "nn_module")
Module <- R6::R6Class(
classname = classname,
inherit = inherit,
lock_objects = FALSE,
public = list(
.classes = classes,
...
),
private = private,
active = active,
parent_env = e
)
init <- get_init(Module)
fun <- rlang::new_function(
args = rlang::fn_fmls(init),
body = rlang::expr({
instance <- Module$new(!!!rlang::fn_fmls_syms(init))
create_nn_module_callable(instance)
})
)
attr(fun, "class") <- c(classes, "nn_module_generator")
attr(fun, "module") <- Module
fun
}
create_nn_module_callable <- function(instance) {
f <- instance$forward
attr(f, "class") <- instance$.classes
attr(f, "module") <- instance
rlang::env_binding_unlock(instance, "clone")
on.exit({lockBinding("clone", instance)}, add = TRUE)
clone <- instance$clone
instance$clone <- function(deep = FALSE, ..., replace_values = TRUE) {
if (deep && replace_values) {
state_dict <- append(instance$parameters, instance$buffers)
names(state_dict) <- sapply(state_dict, xptr_address)
state_dict <- state_dict[!duplicated(names(state_dict))]
state_dict <- lapply(state_dict, function(x) x$detach()$clone())
# also need to append a clone of the modules to this list.
# child modules can be duplicated - and have the same name
# child modules are also deep cloned, but we don't need to replace
# their values when cloning because we only have to do it once.
children <- instance$children
names(children) <- sapply(children, rlang::obj_address)
children <- children[!duplicated(names(children))]
children <- lapply(children, function(x) x$clone(deep = deep, replace_values = FALSE))
state_dict <- append(state_dict, children)
}
cloned_instance <- clone(deep = deep)
if (deep && replace_values) {
cloned_instance$.replace_values_from_table(state_dict)
}
cloned_instance
}
f
}
#' @export
`$.nn_module` <- function(x, y) {
module <- attr(x, "module")
do.call("$", args = list(module, y))
}
#' @export
`[[.nn_module` <- function(x, y) {
module <- attr(x, "module")
do.call("[[", args = list(module, y))
}
#' @export
`$.nn_Module` <- function(x, y) {
x[[y]]
}
#' @export
`[[.nn_Module` <- function(x, y) {
if (y == ".__enclos_env__") {
return(NextMethod())
}
if (is.numeric(y)) {
return(x[[".__enclos_env__"]][["private"]][["modules_"]][[y]])
}
pars <- x[[".__enclos_env__"]][["private"]][["parameters_"]]
if (!is.null(pars)) {
o <- pars[[y]]
if (!is.null(o)) {
return(o)
}
}
bufs <- x[[".__enclos_env__"]][["private"]][["buffers_"]]
if (!is.null(bufs)) {
o <- bufs[[y]]
if (!is.null(o)) {
return(o)
}
}
mods <- x[[".__enclos_env__"]][["private"]][["modules_"]]
if (!is.null(mods)) {
o <- mods[[y]]
if (!is.null(o)) {
return(o)
}
}
find_method <- x[[".__enclos_env__"]][["private"]][["find_method"]]
if (!is.null(find_method)) {
o <- find_method(y)
if (!is.null(o)) {
return(o)
}
}
NextMethod("[[", x)
}
#' @export
`[[<-.nn_Module` <- function(x, name, value) {
if (inherits(value, "nn_parameter")) {
x$register_parameter(name, value)
} else if (inherits(value, "nn_buffer")) {
x$register_buffer(name, value, attr(value, "persistent"))
} else if (is_nn_module(value)) {
x$add_module(name, value)
} else {
NextMethod("$<-", x)
}
invisible(x)
}
#' @export
`$<-.nn_Module` <- function(x, name, value) {
x[[name]] <- value
invisible(x)
}
#' @export
`$<-.nn_module` <- function(x, name, value) {
attr(x, "module")[[name]] <- value
invisible(x)
}
#' @export
`[[<-.nn_module` <- `$<-.nn_module`
#' @export
names.nn_module <- function(x, ...) {
x <- attr(x, "module")
NextMethod("names", x)
}
#' @export
print.nn_module <- function(x, ...) {
x <- attr(x, "module")
print(x)
}
#' A sequential container
#'
#' A sequential container.
#' Modules will be added to it in the order they are passed in the constructor.
#' See examples.
#'
#' @param ... sequence of modules to be added
#'
#' @examples
#'
#' model <- nn_sequential(
#' nn_conv2d(1, 20, 5),
#' nn_relu(),
#' nn_conv2d(20, 64, 5),
#' nn_relu()
#' )
#' input <- torch_randn(32, 1, 28, 28)
#' output <- model(input)
#' @export
nn_sequential <- module <- nn_module(
classname = "nn_sequential",
initialize = function(...) {
modules <- rlang::list2(...)
for (i in seq_along(modules)) {
self$add_module(name = i - 1, module = modules[[i]])
}
},
forward = function(input) {
for (module in private$modules_) {
input <- module(input)
}
input
}
)
#' @export
length.nn_sequential <- function(x) {
length(x$.__enclos_env__$private$modules_)
}
#' @export
`[[.nn_sequential` <- function(x, y) {
if (rlang::is_scalar_integerish(y)) {
x$.__enclos_env__$private$modules_[[y]]
} else {
NextMethod("[[")
}
}
#' @export
`[.nn_sequential` <- function(x, y) {
nn_sequential(!!!lapply(y, function(i) x[[i]]))
}
#' Prune top layer(s) of a network
#'
#' Prune `head_size` last layers of a nn_module in order to
#' replace them by your own head, or in order to use the pruned module
#' as a sequential embedding module.
#' @param x nn_network to prune
#' @param head_size number of nn_layers to prune
#'
#' @return a nn_sequential network with the top nn_layer removed
#' @export
#'
#' @examples
#' if (torch_is_installed()) {
#' x <- nn_sequential(
#' nn_relu(),
#' nn_tanh(),
#' nn_relu6(),
#' nn_relu(),
#' nn_linear(2,10),
#' nn_batch_norm1d(10),
#' nn_tanh(),
#' nn_linear(10,3)
#' )
#' prune <- nn_prune_head(x, 3)
#' prune
#' }
nn_prune_head <- function(x, head_size) {
UseMethod("nn_prune_head")
}
#' @export
nn_prune_head.nn_sequential <- function(x, head_size=1L ) {
nn_sequential(!!!x$children[1:(length(x)-head_size)])
}
#' @export
nn_prune_head.nn_module <- nn_module(
classname = "nn_sequential",
initialize = function(x, head_size=1L) {
modules <- rlang::list2(!!!x$children[1:(length(x$children)-head_size)])
mod_names <- names(modules)
for (i in seq_along(modules)) {
self$add_module(name = mod_names[i], module = modules[[i]])
}
},
forward = function(...) {
first_module <- TRUE
for (module in private$modules_) {
if (first_module) {
input <- module(...)
} else {
input <- module(input)
}
first_module <- FALSE
}
input
}
)
#' Holds submodules in a list.
#'
#' [nn_module_list] can be indexed like a regular R list, but
#' modules it contains are properly registered, and will be visible by all
#' `nn_module` methods.
#'
#' @param modules a list of modules to add
#' @seealso [nn_module_dict()]
#' @examples
#'
#' my_module <- nn_module(
#' initialize = function() {
#' self$linears <- nn_module_list(lapply(1:10, function(x) nn_linear(10, 10)))
#' },
#' forward = function(x) {
#' for (i in 1:length(self$linears)) {
#' x <- self$linears[[i]](x)
#' }
#' x
#' }
#' )
#' @export
nn_module_list <- nn_module(
"nn_module_list",
initialize = function(modules = list()) {
for (i in seq_along(modules)) {
self$add_module(i - 1, modules[[i]])
}
},
insert = function(index, module) {
modules <- append(private$modules_, list(module), after = index - 1)
private$modules_ <- list()
for (i in seq_along(modules)) {
self$add_module(i - 1, modules[[i]])
}
},
append = function(module) {
i <- length(private$modules_)
self$add_module(i, module)
},
extend = function(modules) {
for (j in seq_along(modules)) {
self$append(modules[[j]])
}
}
)
#' Container that allows named values
#'
#' @param dict A named list of submodules that will be saved in that module.
#' @examples
#' nn_module <- nn_module(
#' initialize = function() {
#' self$dict <- nn_module_dict(list(
#' l1 = nn_linear(10, 20),
#' l2 = nn_linear(20, 10)
#' ))
#' },
#' forward = function(x) {
#' x <- self$dict$l1(x)
#' self$dict$l2(x)
#' }
#' )
#' @seealso [nn_module_list()]
#' @export
nn_module_dict <- nn_module(
initialize = function(dict) {
if (!rlang::is_named(dict)) cli::cli_abort("All elements in {.arg dict} must be named.")
for(nm in names(dict)) {
self[[nm]] <- dict[[nm]]
}
},
forward = function(...) {
cli::cli_abort("{.fn nn_module_dict} has {.fn forward} implementation.")
}
)
#' @export
`[[.nn_module_list` <- function(x, y) {
if (rlang::is_scalar_integerish(y)) {
x$.__enclos_env__$private$modules_[[y]]
} else {
NextMethod("[[")
}
}
#' @export
as.list.nn_module_list <- function(x, ...) {
x$.__enclos_env__$private$modules_
}
#' @export
length.nn_module_list <- function(x, ...) {
length(x$.__enclos_env__$private$modules_)
}
comma <- function(x) {
format(x, nsmall = 0, big.mark = ",", scientific = FALSE)
}
print_nn_module <- function(self, private) {
cli::cat_line(
"An `nn_module` containing ",
comma(get_parameter_count(self)),
" parameters."
)
if (length(private$modules_) > 0) {
cli::cat_line()
cli::cat_rule("Modules")
sapply(names(private$modules_), function(x) {
cli_module_item(x, private$modules_[[x]])
})
}
if (length(private$parameters_) > 0) {
cli::cat_line()
cli::cat_rule("Parameters")
sapply(names(private$parameters_), function(x) {
cli_tensor_item(x, private$parameters_[[x]])
})
}
if (length(private$buffers_) > 0) {
cli::cat_line()
cli::cat_rule("Buffers")
sapply(names(private$buffers_), function(x) {
cli_tensor_item(x, private$buffers_[[x]])
})
}
}
cli_module_item <- function(name, module) {
cli::cat_bullet(paste0(
name,
": <", class(module)[1], "> #",
comma(get_parameter_count(module)),
" parameters"
))
}
cli_tensor_item <- function(name, tensor) {
cli::cat_bullet(paste0(
name,
": ",
make_str_torch_tensor(tensor)
))
}
get_parameter_count <- function(self) {
if (length(self$parameters) == 0) {
return(0)
}
pars <- sapply(self$parameters, function(x) prod(x$shape))
sum(pars)
}
keepvars_or_detach <- function(p, keepvars) {
if (!keepvars) {
out <- p$detach()
out$requires_grad_(p$requires_grad)
out
} else {
p
}
}
|
5b240e2a52d11f3c0f998392644451ff1a25a652
|
a82df8590bdcf2bb4e01e0b610a0765706dc0dbe
|
/R/HelloWorld.R
|
202a71e48068ec523fb2de2741a9664bf6eccdf8
|
[] |
no_license
|
bryankuo/lab
|
db1923ff1479a0dfe3d4fe6b41fb47f97fb19b37
|
521865b25f632887d05a951365e2497c4c22631b
|
refs/heads/master
| 2023-09-04T00:44:33.832607
| 2023-09-03T11:08:28
| 2023-09-03T11:08:28
| 64,229,571
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 45
|
r
|
HelloWorld.R
|
myString <- "Hello, Wolrd!"
print (myString)
|
b4db6d2f4156c2aa4cfb6119a761794e2250141f
|
80e4f2090549ef36daf4ea0d976f42fa596bf9ba
|
/Project/new_copula_garch.R
|
6fb1bc3f63da4426677a7499cc633b45b39a847c
|
[] |
no_license
|
alexgarland/Applied_Quant_Finance
|
4a5a411bd0d0fd10172b0961f7f3ad84b15d756e
|
80e4d25395e1e78f81da676840fbab1d9aab8f25
|
refs/heads/master
| 2020-05-20T05:59:55.018518
| 2016-07-03T19:40:07
| 2016-07-03T19:40:07
| 51,231,778
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,340
|
r
|
new_copula_garch.R
|
library(copula)
library(rugarch)
library(readr)
library(ggplot2)
setwd("~/Applied_Quant_Finance/Project")
rm(list=ls())
spec = ugarchspec(mean.model=list(armaOrder=c(5,5)),
variance.model=list(model = "eGARCH", garchOrder=c(5,5)),
distribution.model = "sstd")
most_data <- read_csv("F-F_Research_Data_Factors_daily.CSV")
mom_data <- read_csv("F-F_Momentum_Factor_daily.CSV")
all_data <- merge(most_data, mom_data, by="Date")
all_data <- all_data[1:23597,]
colnames(all_data)[6] <- "Mom"
all_data$combined <- .5*all_data$HML + .5 * all_data$Mom
fit_value <- ugarchfit(data=all_data$HML, spec=spec, solver='hybrid')
fit_mom <- ugarchfit(data=all_data$Mom, spec=spec, solver='hybrid')
fit_both <- ugarchfit(data=all_data$combined, spec=spec, solver='hybrid')
value_shock <- residuals(fit_value, standardize=T)
value_shock <- as.vector(value_shock)
mom_shock <- as.vector(residuals(fit_mom, standardize=T))
both_shock <- as.vector(residuals(fit_both, standardize=T))
mom_both <- pobs(data.frame(mom = mom_shock, both = both_shock))
value_both <- pobs(data.frame(val = value_shock, both = both_shock))
mom_value <- pobs(data.frame(mom = mom_shock, val = value_shock))
fit_mb <- fitCopula(ellipCopula("t", dim=2), data=mom_both, method="ml")
fit_vb <- fitCopula(ellipCopula("t", dim=2), data=value_both, method="ml")
fit_vm <- fitCopula(ellipCopula("t", dim=2), data=mom_value, method="ml")
tailIndex(fit_mb@copula)
tailIndex(fit_vb@copula)
tailIndex(fit_vm@copula)
mom_value_obs <- pobs(data.frame(mom = all_data$Mom, val = all_data$HML))
fit_vm_obs <- fitCopula(ellipCopula("t", dim=2), data=mom_value_obs, method="ml")
tailIndex(fit_vm_obs@copula)
mom_arma <- arima(all_data$Mom, order=c(5,0,5))
val_arma <- arima(all_data$HML, order=c(5,0,5))
mom_val_res <- pobs(data.frame(mom = mom_arma$residuals, val = val_arma$residuals))
fit_vm_res <- fitCopula(ellipCopula("t", dim=2), data=mom_val_res, method="ml")
tailIndex(fit_vm_res@copula)
plot(mom_value, cex=.0001)
first_plot <- ggplot(data = as.data.frame(mom_value), aes(x = mom, y = val)) +
geom_point(size = .001, color = "firebrick")
#Summary statistics
strats <- data.frame(all_data$HML, all_data$Mom, all_data$combined)
corr_mat <- cor(strats)
shocks <- data.frame(mom = mom_shock, val = value_shock)
corr_shocks <- cor(shocks)
|
7f8cbc129b2eee0d2f9e41d6c86ff68a5fe4c582
|
45aebfdd9d491ce87ed4121737f6a5d892bc7646
|
/man/dalr.Rd
|
a76f4987cdfe01060880e33af88683167e4fe573
|
[
"BSD-3-Clause"
] |
permissive
|
schiffner/locClass
|
3698168da43af5802e5391c3b416a3ca3eb90cbe
|
9b7444bc0556e3aafae6661b534727cd8c8818df
|
refs/heads/master
| 2021-01-19T05:21:57.704770
| 2016-08-21T19:25:12
| 2016-08-21T19:25:12
| 42,644,102
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 12,011
|
rd
|
dalr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dalr.R
\name{dalr}
\alias{dalr}
\alias{dalr.data.frame}
\alias{dalr.default}
\alias{dalr.formula}
\alias{dalr.matrix}
\title{Discriminant Adaptive Logistic Regression}
\usage{
dalr(X, ...)
\method{dalr}{formula}(formula, data, weights, ..., subset, na.action)
\method{dalr}{data.frame}(X, ...)
\method{dalr}{matrix}(X, Y, weights = rep(1, nrow(X)), intercept = TRUE,
..., subset, na.action)
\method{dalr}{default}(X, Y, thr = 0.5, wf = c("biweight", "cauchy",
"cosine", "epanechnikov", "exponential", "gaussian", "optcosine",
"rectangular", "triangular"), bw, k, nn.only = TRUE, itr = 3,
intercept = TRUE, weights = rep(1, nrow(X)), ...)
}
\arguments{
\item{X}{(Required if no \code{formula} is given as principal argument.) A \code{matrix} or \code{data.frame} or \code{Matrix} containing the explanatory variables.}
\item{formula}{A formula of the form \code{groups ~ x1 + x2 + \dots}, that is, the response
is the grouping \code{factor} and the right hand side specifies the (non-\code{factor})
discriminators. Details concerning model specification are given in the documentation of \code{\link{glm}}.}
\item{data}{A \code{data.frame} from which variables specified in \code{formula} are to be taken.}
\item{weights}{Initial observation weights (defaults to a vector of 1s).}
\item{subset}{An index vector specifying the cases to be used in the training sample. (NOTE: If given, this argument must be named.)}
\item{na.action}{The default is first, any \code{na.action} attribute of data, second a \code{na.action} setting of options, and third \code{na.fail} if that is unset.
The default is first, a \code{na.action} setting of options, and second \code{na.fail} if that is unset.}
\item{Y}{(Required if no \code{formula} is given as principal argument.) A \code{factor} specifying the class membership for each observation.}
\item{intercept}{Should the model contain an intercept? Passed to \code{\link{glm.fit}}, null.model.}
\item{thr}{The threshold value used to predict class membership, defaults to 0.5. See Details.}
\item{wf}{A window function which is used to calculate weights that are introduced into
the fitting process. Either a character string or a function, e.g. \code{wf = function(x) exp(-x)}.
For details see the documentation for \code{\link[=biweight]{wfs}}.}
\item{bw}{(Required only if \code{wf} is a string.) The bandwidth parameter of the window function. (See \code{\link[=biweight]{wfs}}.)}
\item{k}{(Required only if \code{wf} is a string.) The number of nearest neighbors of the decision boundary to be used in the fitting process. (See \code{\link[=biweight]{wfs}}.)}
\item{nn.only}{(Required only if \code{wf} is a string indicating a window function with infinite support and if \code{k} is specified.) Should
only the \code{k} nearest neighbors or all observations receive positive weights? (See \code{\link[=biweight]{wfs}}.)}
\item{itr}{Number of iterations for model fitting, defaults to 3. See also the Details section.}
\item{\dots}{Further arguments to \code{\link{glm}}. Currently \code{offset},
\code{control}, \code{model}, \code{x}, \code{y}, \code{contrasts}, \code{start}, \code{etastart},
\code{mustart} are supported.}
}
\value{
An object of class \code{"dalr"} inheriting from class \code{"glm"}, a list containing at least the following components:
Values of \code{glm}:
\item{coefficients}{A named vector of coefficients.}
\item{residuals}{The working residuals, that is the residuals in the final iteration of the IWLS fit.
Since cases with zero weights are omitted, their working residuals are \code{NA}.}
\item{fitted.values}{The fitted mean values, obtained by transforming the linear predictors by the inverse of the link function.}
\item{rank}{The numeric rank of the fitted linear model.}
\item{family}{The \code{\link{family}} object used.}
\item{linear.predictor}{The linear fit on link scale.}
\item{deviance}{Up to a constant, minus twice the maximized log-likelihood. Where sensible, the constant is chosen so that a saturated model has deviance zero.}
\item{aic}{A version of Akaike's An Information Criterion, minus twice the maximized log-likelihood plus twice the number of parameters,
computed by the aic component of the family. For binomial and poisson families the dispersion is fixed at one and the number of parameters
is the number of coefficients. For gaussian, Gamma and inverse gaussian families the dispersion is estimated from the residual deviance,
and the number of parameters is the number of coefficients plus one. For a gaussian family the MLE of the dispersion is used so this is a
valid value of AIC, but for Gamma and inverse gaussian families it is not. For families fitted by quasi-likelihood the value is NA.}
\item{null.deviance}{The deviance for the null model, comparable with deviance. The null model will include the offset, and an intercept if there is one in the model.
Note that this will be incorrect if the link function depends on the data other than through the fitted mean: specify a zero offset to force a correct calculation.}
\item{iter}{The number of iterations of IWLS used.}
\item{weights}{A list of length \code{itr + 1}. The working weights, that is the observation weights in the final iteration of the IWLS fit.}
\item{prior.weights}{A list of length \code{itr + 1}. The observation weights initially supplied, the first list element is a vector of 1s if none were specified.}
\item{df.residual}{The residual degrees of freedom.}
\item{df.null}{The residual degrees of freedom for the null model.}
\item{y}{If requested (the default) the y vector used. (It is a vector even for a binomial model.)}
\item{x}{If requested, the model matrix.}
\item{model}{If requested (the default), the model frame.}
\item{converged}{Logical. Was the IWLS algorithm judged to have converged?}
\item{boundary}{Logical. Is the fitted value on the boundary of the attainable values?}
\item{call}{The (matched) function call.}
\item{formula}{The formula supplied.}
\item{terms}{The \code{\link{terms}} object used.}
\item{data}{The data argument.}
\item{offset}{The offset vector used.}
\item{control}{The value of the control argument used.}
\item{method}{The name of the fitter function used, currently always \code{"glm.fit"}.}
\item{contrasts}{(Where relevant) the contrasts used.}
\item{xlevels}{(Where relevant) a record of the levels of the factors used in fitting.}
\item{na.action}{(Where relevant) information returned by \code{\link{model.frame}} on the special handling of NAs.}
Additionally, \code{dalr} returns
\item{lev}{The class labels (the levels of \code{grouping}).}
\item{thr}{The threshold used.}
\item{itr}{The number of iterations used.}
\item{wf}{The window function used. Always a function, even if the input was a string.}
\item{bw}{(Only if \code{wf} is a string or was generated by means of one of the functions documented in \code{\link[=biweight]{wfs}}.)
The bandwidth used, \code{NULL} if \code{bw} was not specified.}
\item{k}{(Only if \code{wf} is a string or was generated by means of one of the functions documented in \code{\link[=biweight]{wfs}}.)
The number of nearest neighbors used, \code{NULL} if \code{k} was not specified.}
\item{nn.only}{(Logical. Only if \code{wf} is a string or was generated by means of one of the functions documented in \code{\link[=biweight]{wfs}} and if \code{k} was
specified.) \code{TRUE} if only the \code{k} nearest neighbors recieve a positive weight, \code{FALSE} otherwise.}
\item{adaptive}{(Logical.) \code{TRUE} if the bandwidth of \code{wf} is adaptive to the local density of data points, \code{FALSE} if the bandwidth
is fixed.}
}
\description{
A local version of logistic regression for classification that puts increased emphasis
on a good model fit near the decision boundary.
}
\details{
Local logistic regression (Hand and Vinciotti, 2003) is a modification of the standard logistic regression approach
to discrimination. For discrimination a good fit of the model is required especially near the true decision
boundary. Therefore weights are introduced into the fitting process that reflect the proximity of training points
to the decision boundary.
Let the class levels be 0 and 1.
The distance of a training observation \eqn{x} to the decision boundary is measured by means of
the difference \eqn{P(1 | x) - thr} where \code{thr} is a threshold in \eqn{[0,1]}.
Since \eqn{P(1 | x)} is not known in advance an iterative
procedure is required. We start by fitting an unweighted logistic regression model to the data in order to obtain
initial estimates of \eqn{P(1 | x)}. These are used to calculate the observation weights.
Model fitting and calculation of weights is done several times in turn.
By default, the number of iterations is limited to 3.
The name of the window function (\code{wf}) can be specified as a character string.
In this case the window function is generated internally in \code{dalr}. Currently
supported are \code{"biweight"}, \code{"cauchy"}, \code{"cosine"}, \code{"epanechnikov"},
\code{"exponential"}, \code{"gaussian"}, \code{"optcosine"}, \code{"rectangular"} and
\code{"triangular"}.
Moreover, it is possible to generate the window functions mentioned above in advance
(see \code{\link[=biweight]{wfs}}) and pass them to \code{dalr}.
Any other function implementing a window function can also be used as \code{wf} argument.
This allows the user to try own window functions.
See help on \code{\link[=biweight]{wfs}} for details.
Internally, \code{\link{glm.fit}} with \code{family = binomial()} is used and the weights produced using
\code{wf} are passed to \code{\link{glm.fit}} via its \code{weights} argument.
If the predictor variables include factors, the formula interface must be used in order
to get a correct model matrix.
Warnings about non-integer #successes in a binomial glm are expected.
}
\examples{
# generate toy data set of Hand und Vinciotti (2003):
x1 <- x2 <- seq(0.1,1,0.05)
train <- expand.grid(x1 = x1, x2 = x2)
posterior <- train$x2/(train$x1 + train$x2)
y <- as.factor(sapply(posterior, function(x) sample(0:1, size = 1,
prob = c(1-x,x))))
train <- data.frame(train, y = y)
par(mfrow = c(1,3))
# contours of true class posterior probabilities:
plot(train$x1, train$x2, col = y, pch = 19, main = "true posteriors")
contour(x1, x2, matrix(posterior, length(x1)), add = TRUE)
# 0.3-contour line fit of logistic regression:
glob.fit <- glm(y ~ ., data = train, family = "binomial")
plot(train$x1, train$x2, col = y, pch = 19, main = "global fit")
contour(x1, x2, matrix(glob.fit$fitted.values, length(x1)),
levels = 0.3, add = TRUE)
# 0.3-contour line fit of local logistic regression:
loc.fit <- dalr(y ~ ., data = train, thr = 0.3, wf = "gaussian", bw = 0.2)
plot(train$x1, train$x2, col = y, pch = 19, main = "local fit")
contour(x1, x2, matrix(loc.fit$fitted.values, length(x1)),
levels = 0.3, add = TRUE)
# specify wf as a character string:
dalr(y ~ ., data = train , thr = 0.3, wf = "rectangular", k = 50)
# use window function generating function:
rect <- rectangular(100)
dalr(y ~ ., data = train, thr = 0.3, wf = rect)
# specify own window function:
dalr(y ~ ., data = train, thr = 0.3, wf = function(x) exp(-10*x^2))
# generate test data set:
x1 <- runif(200, min = 0, max = 1)
x2 <- runif(200, min = 0, max = 1)
test <- data.frame(x1 = x1, x2 = x2)
pred <- predict(loc.fit, test)
prob <- test$x2/(test$x1 + test$x2)
y <- as.factor(sapply(prob, function(x) sample(0:1, size = 1,
prob = c(1-x,x))))
mean(y != pred$class)
}
\references{
Hand, D. J., Vinciotti, V. (2003), Local versus global models for classification problems:
Fitting models where it matters, \emph{The American Statistician}, \bold{57(2)} 124--130.
}
\seealso{
\code{\link{predict.dalr}}, \code{\link{glm}}, \code{\link{predict.glm}}.
}
\keyword{classif}
\keyword{model}
\keyword{multivariate}
|
490edda9a5d5b80a12b78aa73add9c0e6f58d7f7
|
65c2638762ac591ae8251e3790b4e69b273d3cd0
|
/lab/lab3.R
|
bb07b4383518dec502bda74377b4d62f1b3458ee
|
[] |
no_license
|
QidongS/ph245
|
889252d77a79b30b131e1630615040eab5e82ecf
|
b40f2c74dbdfa34e4d6108771622d32f93f146b0
|
refs/heads/master
| 2020-09-03T09:02:26.550078
| 2019-12-05T03:01:03
| 2019-12-05T03:01:03
| 219,431,702
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 457
|
r
|
lab3.R
|
matrix.100 <- matrix(data = 1:100, nrow = 10, ncol = 10, byrow = FALSE,
dimnames = NULL)
numbers <- c(4,5,9,10,12,15,16,17)
mat1 <- matrix(numbers, nrow = 4, ncol = 2, byrow= FALSE)
mat2 <- matrix(numbers, nrow = 4, ncol = 2, byrow = TRUE)
a <- matrix.100[c(1,3,5),]
mat3 <- mat1[,c(1:2)]
data <- read.csv("wcgs.csv")
print(data)
print(data[c(1:6),])
print(data.frame[,c(3,4)])
print(matrix.100)
print(mat1)
print(mat2)
print(mat3)
print(a)
|
b6d361561f0532ee5e2073f57cdafbc2bbabc203
|
cf4be0c293910912aa0ac773c9301cdb1a74dbe5
|
/R/transactions.R
|
c897b238e9aac28c0bfa813124176b5ae15acbb2
|
[
"MIT"
] |
permissive
|
Karagul/investmentsim
|
ee946f76aafc2e567255c413a211106104918e5f
|
4b39d756e13a5629f6691a5dbe4c6eecaa72fd77
|
refs/heads/master
| 2021-01-08T16:16:41.510663
| 2019-09-11T09:43:34
| 2019-09-11T09:43:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 746
|
r
|
transactions.R
|
## Transactions
## ------------
##
## A transaction has type:
## Time -> Amount
#' A non-transaction
#'
#' A transaction that does nothing.
#'
#' @param date a date
#' @export
no_transactions <- function(date) 0
#' Create a transaction path
#'
#' Creates a transaction path the applies each transaction on a given date.
#'
#' @param amounts a vector of dollar amounts; positive numbers are
#' contributions and negative numbers are withdrawals
#' @param dates a vector of the dates on which each transaction occurs
#' @export
make_transactions_on_dates <- function (amounts, dates) {
function (t) {
pos <- Position(function(d) t == d, dates)
ifelse(is.na(pos),
0,
amounts[[pos]])
}
}
|
9c8a14b3f8554c2a9ca588afec04e0f648716fe2
|
1aa278020398e19d726c226c44f6406e0952fbf1
|
/global.R
|
e357359b2c1cd079e4b3c08c44991a0f902edf60
|
[] |
no_license
|
angoodkind/TypeShift
|
11d82eae82e2ac7d31f6f529a2d2a491fabf7d7d
|
900af378ad24d9784f211eeddc1fea133c7d144d
|
refs/heads/master
| 2021-01-17T14:26:32.373460
| 2016-07-06T13:50:58
| 2016-07-06T13:50:58
| 53,366,533
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,167
|
r
|
global.R
|
library(tidyr)
library(dplyr)
library(reshape)
# read in CSV and format
df <- read.csv('tokens.csv',sep='|',na.strings = "")
# df <- read.csv('token_viz_debug.csv',sep=',',na.strings = "")
df <- within(df, user.answer <- paste(UserID, QuestionID, sep='-'))
df$Token <- as.character(df$Token)
df <- df[ , !names(df) %in% c('CogLoad')]
df$TokenTime <- df$EndTime - df$StartTime
# keystroke rate = keystroke count / time
df$KeystrokeRate <- df$KeystrokeCount/df$TokenTime
# add a column of cumulative total keystrokes
df <- transform(df, cumul.ans.keystroke = ave(KeystrokeCount, user.answer,
FUN=cumsum))
# normalize keystroke rate across users
df$KeystrokeRate.Z <- (df$KeystrokeRate - min(df$KeystrokeRate))/
(max(df$KeystrokeRate)-min(df$KeystrokeRate))
# normalized cumulative time progress
df$Relative.Time.Progress <- df$EndTime/
ave(df$EndTime,df$user.answer,FUN=max)
df$Percent.Complete <- 100*df$Relative.Time.Progress
# add user information
df.userInfo <- read.csv('user_data.csv',sep=',',na.strings="")
df.userInfo <- df.userInfo[!duplicated(df.userInfo[, 1]), ]
df.userInfo$NativeEnglish <- ifelse(df.userInfo$FirstLanguage=='English','English',
ifelse(is.na(df.userInfo$FirstLanguage),'Non-English',
'Non-English'))
df.userInfo <- within(df.userInfo, Demog.Info <- paste("User: ", sprintf("%02d",UserID),
", Age: ", Age,
", Gender: ", toupper(Gender),
", L1: ", FirstLanguage,
sep=""))
df <- merge(df,df.userInfo,by="UserID")
# add question information
df.questionInfo <- read.csv('question_data.csv',sep=',', na.strings="")
df.questionInfo$QuestionText <- as.character(df.questionInfo$QuestionText)
df <- merge(df,df.questionInfo,by="QuestionID")
# add POS information
df.pos <- read.csv("POS.csv",sep=',')
df <- merge(df,df.pos, by='POS')
# write.table(df, "FullTokenInfo.csv", sep='|', row.names=F)
|
49fce69ccad070950f1e5be6f497a43f6a263530
|
acdb497aa8a47599d3b7bd9438be2101b6ef415a
|
/man/CH11TA01.Rd
|
aabea7171f1ddd1ffdcfb7af91683cdb5f5f4804
|
[] |
no_license
|
bryangoodrich/ALSM
|
106ce1ab43806ec7c74fc72f9a26a094bf1f61d1
|
6fe1a413f996d26755638e9b2c81ae0aafd1a509
|
refs/heads/main
| 2022-07-15T15:55:23.708741
| 2022-07-03T19:55:04
| 2022-07-03T19:55:04
| 39,878,127
| 16
| 9
| null | null | null | null |
UTF-8
|
R
| false
| false
| 378
|
rd
|
CH11TA01.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{CH11TA01}
\alias{CH11TA01}
\title{CH11TA01}
\format{\preformatted{'data.frame': 54 obs. of 2 variables:
$ V1: int 27 21 22 24 25 23 20 20 29 24 ...
$ V2: int 73 66 63 75 71 70 65 70 79 72 ...
}}
\usage{
CH11TA01
}
\description{
CH11TA01
}
\keyword{datasets}
|
72a08666828eb62e00d99a3c33743df854040131
|
e3a1a949ac383c9abe8f72fb04fa85c5107d3131
|
/R/area_between.R
|
05e08c7aaf1c112fa5797d24904dc8955599c924
|
[] |
no_license
|
dleguyader/riverbed
|
4d881e9931e18ac1e859c6a542aee7a02f916908
|
4e1697632b4d47409a5230e619f311b80b071052
|
refs/heads/master
| 2022-06-13T21:30:05.240121
| 2020-05-05T11:03:33
| 2020-05-05T11:03:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,948
|
r
|
area_between.R
|
#' A function to calculate area between two profiles
#'
#' This function takes two profiles defined as (l1,z1) and (l2,z2) as inputs and calculates area between them
#' @param s1 tibble with columns l and z describing first profile
#' @param s2 tibble with columns l and z describing second profile
#' @param h if provided by user, the second profile is supposed to be horizontal, with constant height=h (defaults to NA)
#' @param sigma_z a vector of length 1 or 2 providing an estimate of the error in measures of height z1 and z2
#' @param sigma_l a vector of length 1 or 2 providing an estimate of the error in measures of longitudinal coordinates l1 and l2z1 and z2
#' @return area
#' @return area_by_type
#' @return sigma_area
#' @return data
#' @export
#' @examples
#' s1 <- tibble(l=c(1,3,5,6,9),
#' z=c(1,2,3,2.5,5))
#' s2 <- tibble(l=c(0.5,2.5,4,6,8),
#' z=c(3,1,2,4,3))
#' area_between(s1,s2)
#' area_between(s1,h=2)
area_between=function(s1,
s2=NA,
h=NA,
sigma_z=c(NA,NA),
sigma_l=c(NA,NA)){
# calculate area
## calculate area of all successive trapezia
dat <- cross_series(s1,s2,h) %>%
mutate(w=lead(l,1)-l,
L=z1-z2) %>%
mutate(La=L,
Lb=lead(L,1)) %>%
mutate(a=w*(La+Lb)/2) %>%
mutate(type=case_when(a>0~"upper",
a<0~"lower")) %>%
arrange(l) %>%
select(l,z1,p,z2,a,type,l_obs,z_obs,series) %>%
mutate(order=2)
datc <- dat %>%
filter(p=="intersect") %>%
mutate(type=case_when(type=="upper"~"lower",
type=="lower"~"upper"),
a=NA,
order=1)
dat=bind_rows(dat,datc) %>%
arrange(l,order) %>%
select(-order)
ind=max(which(!(is.na(dat$type))))
if(ind<=nrow(dat)){dat$type[ind+1]=dat$type[ind]}
area <- dat %>%
filter(!is.na(a)) %>%
summarise(area=sum(a,na.rm=TRUE)) %>%
pull()
area_by_type <- dat %>%
filter(!is.na(a)) %>%
group_by(type) %>%
summarise(area=sum(a,na.rm=TRUE)) %>%
filter(!is.na(type))
# calculation of uncertainty
if(length(sigma_z)==1){sigma_z=rep(sigma_z,2)}
if(length(sigma_l)==1){sigma_l=rep(sigma_l,2)}
calc_var=function(dat,index){
sigz=sigma_z[index]
sigl=sigma_l[index]
var <-
dat %>%
filter(series==paste0("s",index)) %>%
# With W=l_b-l_a (horizontal length)
# And Z=0.5*(z_b+z_a) (mean vertical height)
# Formula of area= W*Z
mutate(W=lead(l_obs,1)-l_obs,
Z=0.5*(z_obs+lead(z_obs,1))) %>%
# For each trapezia we consider only one point as a variable
mutate(varW=2*(sigl^2),
varZ=2*(0.5^2)*(sigz^2)) %>%
mutate(ntot=n(),
boundary=row_number()==1|row_number()==ntot-1) %>%
# Adjust to boundaries of series -> twice the variance
mutate(varW=case_when(boundary~2*varW,
!boundary~varW),
varZ=case_when(boundary~2*varZ,
!boundary~varZ)) %>%
# Var(XY)=var(X)var(Y)+var(X)(E(Y)^2)+var(Y)(E(X)^2)
mutate(var=varW*varZ + varW*(Z^2) + varZ*(W^2)) %>%
summarise(var=sum(var,na.rm=TRUE)) %>%
pull()
return(var)
}
var1 <- calc_var(dat,index=1)
var2 <- calc_var(dat,index=2)
sigma_area <- sqrt(var1+var2)
dat <- dat %>%
mutate(z1min=case_when(series=="s1"~z1-sigma_z[1]),
z1max=case_when(series=="s1"~z1+sigma_z[1]),
z2min=case_when(series=="s2"~z2-sigma_z[2]),
z2max=case_when(series=="s2"~z2+sigma_z[2])) %>%
mutate(lmin= case_when(series=="s1"~l-sigma_l[1]),
lmax= case_when(series=="s1"~l+sigma_l[1]),
lmin= case_when(series=="s2"~l-sigma_l[2]),
lmax= case_when(series=="s2"~l+sigma_l[2]))
return(list(data=dat,
area=area,
area_by_type=area_by_type,
sigma_area=sigma_area))
}
|
7ea5e097c2e3b7a53c425c9a0dfaf4bb70c1a005
|
ae5429663cd1e071e62fbef6d8081bd7f955a90b
|
/man/sscs_units.Rd
|
3092fd47f560d8cba0e1c842bd0ba02c32110f30
|
[
"MIT"
] |
permissive
|
btupper/softshell
|
160c41f60dda677e5cebeab359d176607ab661cd
|
a12dce6de32be94ee221ef52d9b47234dccbaca1
|
refs/heads/master
| 2021-01-06T18:48:24.494347
| 2020-02-20T17:38:20
| 2020-02-20T17:38:20
| 241,447,237
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 394
|
rd
|
sscs_units.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sscs.R
\name{sscs_units}
\alias{sscs_units}
\title{Retrieve the units for a given variable (if known)}
\usage{
sscs_units(x = "crop_legal")
}
\arguments{
\item{x}{character, the name of the variable}
}
\value{
the units for the specified variable
}
\description{
Retrieve the units for a given variable (if known)
}
|
46a6e484b9c2026db3d8e5eaf5bad7f0a66b6f61
|
fa37c0b201d6a837e16d9f8165d958b58c7064b8
|
/app/bs4Dash_version/navbar.R
|
8d1b000431fb0fd52fc1e84c9e13b4b5a23af9f4
|
[] |
no_license
|
DivadNojnarg/Lorenz_Apps
|
043ee113e22b42a22e01cebf946f98e178390606
|
4be1a1a81df9bb77868462a90d7405eb58c1262b
|
refs/heads/master
| 2021-03-22T04:44:15.475044
| 2019-05-26T21:43:43
| 2019-05-26T21:43:43
| 102,622,015
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 425
|
r
|
navbar.R
|
navbar <- bs4DashNavbar(
status = "white",
skin = "light",
rightUi = fluidRow(
utilsUi(id = "utils"),
# reset is not included in the module
# shinyjs does not work in that specific case
actionBttn(
icon = icon("trash"),
inputId = "resetAll",
label = " Reset",
color = "danger",
style = "simple",
size = "xs"
)
),
leftUi = computeLorenzUi(id = "compute")
)
|
982d7245f312eded10a8dad9d039c9fa96c473eb
|
d172c7a2c218c9016fc8a0ba234d48f7c4b19979
|
/assessment/censusPostAnalysis.R
|
dddcb9564ab60aebdc9240f3690593ce38d25095
|
[] |
no_license
|
anbeier/MasterThesis
|
90ecde060f2eca6d4e26ce79cd1f088325c8a61f
|
f6e24043a93461c77cf0ac064ee59778636d63ae
|
refs/heads/master
| 2021-01-23T09:04:24.080237
| 2015-01-28T01:16:31
| 2015-01-28T01:16:31
| 63,418,308
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,438
|
r
|
censusPostAnalysis.R
|
fqsFileList <- function() {
ls <- NULL
f <- 'census_fqs_delta0.7_alpha0.5.txt'
ls <- append(ls, f)
f <- 'census_fqs_delta0.8_alpha0.5.txt'
ls <- append(ls, f)
f <- 'census_fqs_delta0.9_alpha0.5.txt'
ls <- append(ls, f)
return(ls)
}
folderList <- function() {
ls <- NULL
f <- 'census_delta0.7_alpha0.5'
ls <- append(ls, f)
f <- 'census_delta0.8_alpha0.5'
ls <- append(ls, f)
f <- 'census_delta0.9_alpha0.5'
ls <- append(ls, f)
return(ls)
}
analysing <- function(fqsFiles, folders) {
# A data frame with 3 columns: column, randomguess, threshold
# maxErrors <- calculateErrorThresholds(dataset)
# Get numbers of fqs files
num <- length(fqsFileList)
for (i in 1:num) {
x <- calculateQuality(folders[i], fqsFiles[i])
quality <- x$QualityScore
qualifiedCliques <- x$QualifiedCliques
}
# for each experiment situation, calculate quality score
# input: a list of fqsFile, resultFolder (there will be at least svm, rules in this folder)
# output: quality score, qualified cliques
# a data frame of all quality scores
}
# Return list(QualityScore, QualifiedCliques)
calculateQuality <- function(folderName, fqsFile) {
# Find out a set of good cliques, then get the intersection
good.svm <- findGoodCliquesFromSVM(folderName)
#good.bayes <- findGoodCliquesFromBayes(folderName)
#good.all <- intersectGoodResults(good.svm, good.bayes)
allCliques <- readingQuasiCliques(fqsFile)
quality <- nrow(good.svm) / length(allCliques)
list(QualityScore=quality, QualifiedCliques=good.svm)
}
# Return data.frame(index, target)
findGoodCliquesFromSVM <- function(folderName, method = 'svm') {
fileNames <- list.files(paste(folderName, method, sep='/'), full.names = TRUE)
# For each quasi clique, assess its SVM experiment results
good <- NULL
for(fn in fileNames) {
# Load result.svm variable:
# a list of 2 elements: index (clique index), result (data frame of actual & predicted values)
load(fn)
#log(paste("Examing svm clique", result.svm$index))
print(paste("Examing svm clique", result.svm$index, "..."))
x <- isGoodFactorClique(result.svm$result)
if(x$boolean) {
good <- rbind(good, data.frame(index = result.svm$index,
target = x$best$target,
mcc = x$best$mcc))
}
}
print('Finished')
return(good)
}
# Return data.frame(index, target)
findGoodCliquesFromBayes <- function(folderName, method='bayes') {
fileNames <- list.files(paste(folderName, method, sep='/'), full.names = TRUE)
good <- NULL
for(fn in fileNames) {
# Load result.bayes variable
load(fn)
log(paste("Examing bayes clique", result.bayes$index))
x <- isGoodFactorClique(result.bayes$result)
if(x$boolean) {
good <- rbind(good, data.frame(index = result.bayes$index,
target = x$target,
mcc = x$mcc))
}
}
return(good)
}
isGoodSVMClique <- function(experimentResult, errorThresholds) {
# Do assessment for each target column
dfs <- split(experimentResult, experimentResult$target)
boolean <- FALSE
best <- NULL
provedTestError <- assessTestingError(dfs, errorThresholds)
if(!is.null(provedTestError)) {
provedTestError <- as.character(provedTestError[which.min(provedTestError$testerror), 'target'])
provedF1Score <- assessF1Score(dfs)
if(!is.null(provedF1Score)) {
boolean <- TRUE
provedF1Score <- as.character(provedF1Score[which.max(provedF1Score$f1score), 'target'])
}
}
if(boolean) {
if(!provedTestError == provedF1Score) {
best <- paste(provedTestError, provedF1Score, sep = ',')
} else {
best <- provedTestError
}
}
list(boolean=boolean, target=best)
}
# Return list(boolean, best)
isGoodFactorClique <- function(experimentResult) {
dfs <- split(experimentResult, experimentResult$target)
boolean <- FALSE
best <- NULL
proved <- assessMatthewsCorrelationCoefficient(dfs)
if(!is.null(proved)) {
boolean <- TRUE
best <- proved[which.max(proved$mcc),]
}
list(boolean=boolean, best=best)
}
# Return the best target(s)
getBestTargetColumn <- function(best1, best2) {
best <- NULL
if(!best1[,'target'] == best2[,'target']) {
if(best1[,2] == best2[,2]) {
best <- paste(best1[,'target'], best2[,'target'], sep = ',')
} else if(best1[,2] > best2[,2]) {
best <- best1[,'target']
} else {
best <- best2[,'target']
}
} else {
best <- best2[,'target']
}
return(best)
}
assessTestingError <- function(result.list.by.target, errorThresholds) {
# If predicted does not equal actual, mark an error.
dfs <- lapply(result.list.by.target,
function(x) {
errors <- unlist(apply(x, 1,
function(y) {
if(y[2] != y[3]) {
'error'
}}))
error <- length(errors) / nrow(x)
data.frame(target = x$target[1], testerror = error)
})
# A data frame with 2 columns: target, testerror
df <- Reduce(function(...) merge(..., all=TRUE), dfs)
# Find out target columns of which the test error <= threshold
dfs <- apply(df, 1,
function(x, thres = errorThresholds) {
testerror <- round(as.numeric(x[2]), 2)
t <- round(as.numeric(thres[thres$column == x[1],][3]), 2)
if(testerror <= t) {
data.frame(target=x[1], testerror=x[2], thres[thres$column == x[1],][3])
}
})
dfs <- delete.Nulls(dfs)
# If there exists at least one target column fulfilling the criteria mentioned above,
# it will return a data frame with 3 columns: target, testerror, threshold.
# If not, NULL will be returned.
Reduce(function(...) merge(..., all=TRUE), dfs)
}
# Return a data frame with 2 columns: target, f1score (proved)
assessF1Score <- function(result.list.by.target) {
f1.threshold <- 0.8
# For each data frame (w.r.t a target column), calculate F1 score.
f1.list <- unlist(lapply(result.list.by.target,
function(x) computeF1ScoreMulticlass(x)))
res <- NULL
for(i in 1:length(f1.list)) {
f <- f1.list[i]
# F1 score could be NaN, because precision.u and recall.u could both be zero
if(!is.nan(f)) {
if(f >= f1.threshold & f <= 1) {
res <- rbind(res, data.frame(target = result.list.by.target[[i]]$target[1],
f1score = f))
}
}
}
return(res)
}
# Return a data frame with 2 columns: target, mcc (proved)
assessMatthewsCorrelationCoefficient <- function(result.list.by.target) {
mcc.threshold <- 0.6
mcc.list <- unlist(lapply(result.list.by.target,
# function(x) computeMCCMutliclass(x)))
function(x) computeMCC(x)))
res <- NULL
for(i in 1:length(mcc.list)) {
if(mcc.list[i] > mcc.threshold & mcc.list[i] <= 1) {
res <- rbind(res, data.frame(target = result.list.by.target[[i]]$target[1],
mcc = mcc.list[i]))
}
}
return(res)
}
compare <- function(char, label) {
if(char == label) {
return(1)
} else {
return(0)
}
}
computeMetricesXY <- function(vector, labels) {
res = NULL
for(n in labels) {
binaryForLabelN = sapply(vector, FUN=function(x) compare(x, label = n))
res = cbind(res, binaryForLabelN)
}
res = as.data.frame(res)
names(res) = labels
return(res)
}
computeAverageXk <- function(x, classIndex) {
s = nrow(x)
sum(x[,classIndex]) / s
}
covXY <- function(x, y) {
numOfLabels = length(names(x))
numOfSamples = nrow(x)
summe = 0
for(s in 1:numOfSamples) {
for(k in 1:numOfLabels) {
x_k = computeAverageXk(x, k)
y_k = computeAverageXk(y, k)
tmp = (x[s,k] - x_k)*(y[s,k] - y_k)
summe = summe + tmp
}
}
summe / numOfLabels
}
areAllPredictedSameClass <- function(x) {
booleans = NULL
for(i in 1:ncol(x)) {
areAllZeros = all(x[,i] == x[1,i] & x[1,i] == 0)
booleans = c(booleans, areAllZeros)
}
numOfColumnsWithAllZeros = sum(booleans == TRUE)
if(numOfColumnsWithAllZeros == ncol(x) - 1) {
return(TRUE)
} else {
return(FALSE)
}
}
computeMCC <- function(df) {
df = correctFactorLevels(df)
N = levels(df$actual)
df = na.omit(df)
X = computeMetricesXY(df$predicted, N)
# MCC equals zero if all samples are classified to one class
if(areAllPredictedSameClass(X)) {
return(0)
} else {
Y = computeMetricesXY(df$actual, N)
denominator = sqrt(covXY(X, X) * covXY(Y, Y))
mcc = covXY(X, Y) / denominator
return(round(mcc, 4))
}
}
computeMCCExtern <- function(df) {
df = df
df = correctFactorLevels(df)
df = na.omit(df)
df["target"] = "target"
filename = tempfile()
write.csv(df, file = filename, row.names=FALSE, quote=FALSE)
out = system(paste("./mcc", "-f", filename, sep=" "), intern=TRUE, ignore.stderr = TRUE)
unlink(filename)
con = textConnection(paste("target,mcc\n", out, sep=""))
ret = read.csv(con)
close(con)
return(ret$mcc)
}
# Delete NULL entries in a list
delete.Nulls <- function(aList) {
aList[unlist(lapply(aList, length) != 0)]
}
# Return a data frame with 4 columns: TP, TN, FP, FN
computeConfusionMatrixOfOneLabel <- function(classLabel, data) {
print('computing confusion matrix...')
condition.pos <- subset(data, actual == classLabel) ## data frame of condition positives
condition.neg <- subset(data, !actual == classLabel) ## data frame of condition negatives
tp <- length(condition.pos$predicted[condition.pos$predicted == classLabel])
tn <- length(condition.neg$predicted[!condition.neg$predicted == classLabel])
fp <- length(condition.neg$predicted[condition.neg$predicted == classLabel])
fn <- length(condition.pos$predicted[!condition.pos$predicted == classLabel])
data.frame(TP=tp, TN=tn, FP=fp, FN=fn)
}
computeF1ScoreMulticlass <- function(data) {
# Correct the levels of factors in actual, predicted columns
df <- correctFactorLevels(data)
# Get all categories
cat <- levels(df$actual)
# Compute confusion matrix for each category
lp <- lapply(cat,
function(x, data=df) computeConfusionMatrixOfOneLabel(x, data=df))
df <- Reduce(function(...) merge(..., all=TRUE), lp)
precision.u <- sum(df$TP)/sum(df$TP + df$FP)
recall.u <- sum(df$TP)/sum(df$TP + df$FN)
2 * precision.u * recall.u / (precision.u + recall.u)
}
computeMCCMutliclass <- function(data) {
# Correct the levels of factors in actual, predicted columns
df <- correctFactorLevels(data)
# Get all categories
cat <- levels(df$actual)
# Compute confusion matrix for each category
lp <- lapply(cat,
function(x, data=df) computeConfusionMatrixOfOneLabel(x, data=df))
df <- Reduce(function(...) merge(..., all=TRUE), lp)
print('computing MCC score...')
dividend <- as.numeric(sum(df$TP)) * as.numeric(sum(df$TN)) - as.numeric(sum(df$FP)) * as.numeric(sum(df$FN))
#divisor <- sum(df$TP+df$FP) * sum(df$TP+df$FN) * sum(df$TN+df$FP) * sum(df$TN+df$FN)
divisor <- sqrt(sum(df$TP+df$FP)) * sqrt(sum(df$TP+df$FN)) * sqrt(sum(df$TN+df$FP)) * sqrt(sum(df$TN+df$FN))
dividend / divisor
}
returnMCCTable <- function(experimentResult) {
dfs <- split(experimentResult, experimentResult$target)
mcc.list <- unlist(lapply(dfs,
# function(x) computeMCCMutliclass(x)))
function(x) computeMCC(x)))
# A matrix in which each row indicates a MCC score of one column.
df <- data.frame(target = row.names(as.matrix(mcc.list)), MCC = as.matrix(mcc.list)[,1])
colnames(df) <- c('TargetColumn', 'MCC')
return(df)
}
correctFactorLevels <- function(df) {
l = levels(factor(df$actual))
l = unique(c(l, levels(factor(df$predicted))))
# Drop levels that do not appear.
df$actual = factor(df$actual, levels=l)
# Relevel df$predicted and let them be the same as df$actual.
df$predicted = factor(df$predicted, levels=l)
return(df)
}
# Return a data frame with 2 columns: index, target (the one and only)
intersectGoodResults <- function(res1, res2) {
# Inner join on index
df <- merge(res1, res2, by = 'index')
# Find common value in target
#df$target <- Reduce(function(...) intersect(...),
# list(unlist(strsplit(as.character(df[,2]), ',')),
# unlist(strsplit(as.character(df[,3]), ','))))
df$target <- unlist(apply(df, 1,
function(x) {
y <- intersect(x[2], x[3])
if(length(y) == 0) {
NA
} else if(length(y) == 1) {
y
} else if(length(y) > 1) {
paste(y, collapse = ',')
}
}))
# Remove NAs resulting from intersection
df <- na.omit(df)
# Delete target.x column
df[,2] <- NULL
# Delete target.y column
df[,2] <- NULL
return(df)
}
calculateErrorThresholds <- function(dataset) {
temp <- lapply(colnames(dataset),
function(x, data = dataset) {
cat <- length(levels(data[, x]))
randomguess <- (cat - 1)/cat
# Assumed that 50% of an error rate from random guessing is
# considered as a corresponding threshold
threshold <- randomguess/2
data.frame(column = x,
randomguess = randomguess,
threshold = threshold)
})
# A data frame with 3 columns: column, randomguess, threshold
Reduce(function(...) merge(..., all = TRUE), temp)
}
getRulesResults <- function(filenames) {
dfExperimentsDetails <- NULL
for(fn in filenames) {
load(fn) # Will have a result.rules variable in the environment after loading a file
dfExperimentsDetails <- rbind(dfExperimentsDetails, result.rules)
}
return(dfExperimentsDetails)
}
# If some rule had a lift of 1, it would imply that the probability of occurrence of the antecedent and
# that of the consequent are independent of each other.
# If the lift is > 1, that lets us know the degree to which those two occurrences are dependent on one another,
# and makes those rules potentially useful for predicting the consequent in future data sets.
findGoodCliquesFromRules <- function(resultsFromRules, dataset) {
dominators <- getAllDominantColumnValues(dataset)
data <- resultsFromRules[resultsFromRules$lift >= 1.1, ]
dfs <- split(data, f = data[, 'clique_index']) ## Split data into data frames w.r.t. clique_index
res <- Reduce(function(...) merge(..., all=T),
lapply(dfs,
function(x, dom = dominators) {
temp <- isGoodClique(x, dom)
if(temp$isGood) {
temp$goodRules
} else {
NA
}
}))
res$y <- NULL
return(res)
}
# Tested
# A clique is considered good if there is at least one column fulfilling the conditions.
isGoodClique <- function(qs, dominators) {
dfs <- split(qs, f = qs[, 'outcome_column']) ## Split data into data frames w.r.t. outcome_column
isGood <- FALSE
columns <- NULL
for(df in dfs) {
for(lvl in levels(as.factor(df$outcome_value))) {
if(!isColumnValueDominant(df$outcome_column[1], lvl, dominators)) {
isGood <- TRUE
columns <- c(columns, df$outcome_column[1])
}
}
}
df <- qs[qs$outcome_column %in% unique(columns), ]
list(isGood = isGood, goodRules = df)
}
# Tested
# Return a list of data frames indicating the dominant values according to each column in the dataset.
getAllDominantColumnValues <- function(dataset) {
rowNum = nrow(dataset)
ls <- NULL
for(column in colnames(dataset)) {
ls.level <- split(dataset[, column], f = dataset[, column]) ## a list of vectors w.r.t. a specific level of this column
df.level.prop <- calculateLevelProportions(ls.level, rowNum)
df.level.dominant <- getDominantLevels(df.level.prop)
if(length(ls) == 0) {
ls <- list(column = df.level.dominant)
names(ls) = column
} else if(length(ls) > 0) {
ls <- c(ls, list(column = df.level.dominant))
names(ls)[length(ls)] = column
}
}
return(ls)
}
# Tested
calculateLevelProportions <- function(list.of.levels, dim.rows) {
level.names <- NULL
level.proportions <- NULL
for(v in list.of.levels) {
level.names <- c(level.names, as.character(v[1]))
level.proportions <- c(level.proportions, round(length(v) / dim.rows, 4))
}
data.frame(level = level.names, proportion = level.proportions)
}
# Tested
# The levels of which proportions are greater than a threshold proportion are considerd dominant.
getDominantLevels <- function(df) {
prop <- sort(df$proportion)
diff <- NULL
i <- 1 ## initial index
while(i < length(prop)) {
temp <- prop[i+1] - prop[i]
diff <- c(diff, temp)
i <- i + 1
}
thres <- prop[which.max(diff)] ## Mark the index of the highest differece
df <- df[df$proportion > thres, ]
return(df)
}
isColumnValueDominant <- function(col, val, dominators) {
df <- dominators[[col]]
if(val %in% df$level) {
return(TRUE)
} else {
return(FALSE)
}
}
cliquesFulfillingAllCriterion <- function(result1, result2) {
cliques1 <- unique(result1$clique_index)
cliques2 <- unique(result2$clique_index)
res <- Reduce(intersect, list(cliques1, cliques2))
return(res)
}
isPrunable <- function(x) {
y = FALSE %in% x
!y
}
summarizePruning <- function(folderName, fqsFile) {
fileNames <- list.files(folderName, full.names = TRUE)
for(fn in fileNames) {
df = read.csv(fn)
finalPruningPossible = TRUE
index = as.integer(df$index)
cliqueGroup <- readingQuasiCliques(fqsFile)
# check if a FALSE exists
if(!isPrunable(df$pruningPossible)) {
finalPruningPossible = FALSE
} else {
allCols = NULL
for(cols in as.character(df$pruned)) {
allCols = c(allCols, unlist(strsplit(cols, '|')))
}
allCols = sort(unique(allCols))
if(identical(allCols, sort(cliqueGroup[[index]]))) {
finalPruningPossible = FALSE
}
}
}
}
summarize_data <- function(datasetName, delta, name_map) {
pruning_result_files = list.files(paste(datasetName, '_delta', delta, '_alpha0.5-pruning', sep=''), full.names=TRUE)
results = list()
for (result_file in pruning_result_files) {
df = read.csv(result_file)
index = as.integer(head(df, 1)$index)
cliques = getCliquesForDataset(datasetName, delta)
clique = cliques[[index]]
nice_pruned = sapply(df$pruned, function(x) {
parts = strsplit(as.character(x), "\\|")[[1]]
parts = sapply(parts, function(p) {
name_map[[p]]
})
parts
})
union = c()
for(c in nice_pruned) {
union = c(union, c)
}
union = unique(sort(union))
prunable = length(clique) > length(union)
results = c(results, list(list(index=index, union=union, prunable=prunable, original_size=length(clique))))
}
results
}
create_census_name_map <- function() {
colnames = readLines('census_colnames.txt', encoding = "UTF-8")
ugly_to_colname_map = list()
for (line in colnames) {
ugly_to_colname_map[[modifyColname(line)]] = line
}
ugly_to_colname_map
}
summarize_census <- function(delta) {
summarize_data('census', delta, create_census_name_map())
}
summarize_tpch <- function(delta) {
load('tpch.rdata')
cn = colnames(tpch)
name_map = list()
for (c in cn) {
name_map[[c]] = c
}
summarize_data('tpch', delta, name_map)
}
summary_to_latex <- function(summary, output_file) {
indices = sapply(summary, function(x) {x$index})
summary = summary[order(indices)]
lines = c()
for (data in summary) {
first = TRUE
count = 0
for (col in data$union) {
count = count + 1
parts = c()
if (first) {
first = FALSE
parts = c(parts, paste("\\multirow{", length(data$union), "}{*}{", data$index, "}", sep=''))
parts = c(parts, paste("\\multirow{", length(data$union), "}{*}{", data$prunable, "}", sep=''))
parts = c(parts, paste("\\multirow{", length(data$union), "}{*}{", data$original_size, "/", length(data$union), "}", sep=''))
} else {
parts = c(parts, "", "", "")
}
if (count < length(data$union)) {
parts = c(parts, paste(col, "\\\\ \\cline{4-4}"))
} else {
parts = c(parts, paste(col, "\\\\ \\cline{1-4}"))
}
lines = c(lines, paste(parts, collapse=" & "))
}
}
writeLines(lines, output_file)
}
percentage_of_prunable <- function(summary) {
(100 / length(summary)) * sum(sapply(summary, function(x) {if (x$prunable) {return(1)} else {return(0)}}))
}
percentage_of_ultimate <- function(summary) {
100 - percentage_of_prunable(summary)
}
|
8b92d8d226df9054d604f9932ad81222fb2af0c1
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/PopGenome/examples/get.status-methods.Rd.R
|
60707cd8e5200bf678a173251fa3931408abe519
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 210
|
r
|
get.status-methods.Rd.R
|
library(PopGenome)
### Name: get.status-methods
### Title: State of calculations
### Aliases: get.status-methods get.status,GENOME-method
### Keywords: methods
### ** Examples
# get.status(GENOME.class)
|
c546e5fc0f82fc5f1c027ec2ef8866313708e8e6
|
cd628e1c910d766d54b4fa8417a82890e16cdca3
|
/man/tournaments.Rd
|
f404ced0c4827ee9a82e9cf57cb3e0cdbe8192ad
|
[] |
no_license
|
dashee87/deuce
|
33f34ddaef4a8943adf5d704225eaaf06ec3a1ba
|
de7015ec55e5470ca497472171e50b6c822487dd
|
refs/heads/master
| 2021-01-13T10:16:44.665211
| 2016-06-28T01:03:35
| 2016-06-28T01:03:35
| 69,028,710
| 0
| 0
| null | 2016-09-23T13:44:27
| 2016-09-23T13:44:26
| null |
UTF-8
|
R
| false
| false
| 484
|
rd
|
tournaments.Rd
|
\name{tournaments}
\alias{tournaments}
\docType{data}
\title{Names and codes for ATP tournaments}
\description{
Data frame of ATP tournaments at the 250 level and above.
}
\usage{data(tournaments)}
\format{
Data frame of \code{code}, \code{location}, \code{tournament}, \code{tier}, \code{rounds} and \code{surface}.
}
\source{
\url{http://www.atpworldtour.com}
}
\examples{
data(tournaments)
tournaments[1:10,]
}
\seealso{\link{fetch_draw}}
\keyword{datasets}
\keyword{deuce}
|
ca9a35b7f748ba3620b862502ee8421de4c7b54e
|
85f1b159e885d0e6a8aa972e3a234f9cac33ffa3
|
/HW_10.R
|
0d0e3695dc3069e840a8ab23a2cb8034492fc2eb
|
[] |
no_license
|
sis0004/HW_10
|
103e821b094f4ea6b65461573a472332f4b0db65
|
4f9b80cc97863874b0bc9bc4efa4fd376fd3900b
|
refs/heads/master
| 2020-09-01T16:29:19.683010
| 2019-11-08T14:08:14
| 2019-11-08T14:08:14
| 219,004,766
| 0
| 0
| null | 2019-11-07T19:15:13
| 2019-11-01T14:49:51
|
R
|
UTF-8
|
R
| false
| false
| 1,418
|
r
|
HW_10.R
|
per_capita_co2 <- function(country,year) {
data <- read.csv("data/co2_emissions_tonnes_per_person_gapminder.csv")
year <- paste("X",year,sep = "") #edit the year to match the dormat in the data
#check if the data has the expected year
if (!(year %in% colnames(data))) {
stop(sprintf("No data for %s in %s",country,substring(year,2)))
}
#check if the country is in the data
else if (!(country %in% data$country)) {
stop(sprintf("Consider spellchecking country name or there is no data for %s",country))
}
per_capita <- data[data$country == country,year]
#check if the per capita for the expected country is NA
if (is.na(per_capita)) {
stop(sprintf("No data for %s in %s",country,substring(year,2)))
}
data_year <- data[,year]
new <- quantile(data_year, c(0.1,0.2,0.3,0.4), na.rm=TRUE)
if (per_capita < new[1]) {
sprintf("Grade:A; %.2f tonnes CO2 per person per year", per_capita)
} else if (per_capita > new[1] && per_capita <= new[2]){
sprintf("Grade:B; %.2f tonnes CO2 per person per year", per_capita)
} else if (per_capita > new[2] && per_capita <= new[3]){
sprintf("Grade:C; %.2f tonnes CO2 per person per year", per_capita)
} else if (per_capita > new[3] && per_capita <= new[4]){
sprintf("Grade:D; %.2f tonnes CO2 per person per year", per_capita)
} else {
sprintf("Grade:F; %.2f tonnes CO2 per person per year", per_capita)
}
}
|
be4490778e157f3718321d604ad43c4042e6f5c1
|
1e6695a7107fbe76b472ded8ea28c398b09a08e5
|
/PBSdata/man/spongeCPZ.Rd
|
729b16205508df47868f5df540ddbc00b2b47f6f
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain"
] |
permissive
|
pbs-software/pbs-data
|
ed55421071c3b2460023c166004aa672991bf749
|
fa50fc4cb28e7c5cc3736b9a363b0703a200cf67
|
refs/heads/master
| 2023-07-27T12:22:36.734426
| 2023-07-06T15:57:14
| 2023-07-06T15:57:14
| 37,491,693
| 2
| 2
| null | 2022-07-14T18:34:19
| 2015-06-15T21:14:11
|
R
|
UTF-8
|
R
| false
| false
| 3,479
|
rd
|
spongeCPZ.Rd
|
\name{spongeCPZ}
\alias{spongeCPZ}
\alias{spongeAMZ}
\docType{data}
\title{
Topo: Sponge Reef Zones
}
\description{
Sponge reef core protected zones and adaptive management zones within the
proposed MPA \emph{Hecate Strait and Queen Charlotte Sound Glass Sponge Reefs}.
}
\usage{
data(spongeCPZ)
data(spongeAMZ)
}
\format{
\code{spongeCPZ}: a data frame with 43 observations on 5 variables (below). \cr
\code{spongeAMZ}: a data frame with 34 observations on 4 variables (no \code{SID}).
\describe{
\item{\code{PID}}{primary polygon ID}
\item{\code{SID}}{secondary polygon ID}
\item{\code{POS}}{vertex position}
\item{\code{X}}{geographic longitude (\eqn{^\circ}{deg}W)}
\item{\code{Y}}{geographic latitude (\eqn{^\circ}{deg}N)}
}
}
\details{
The Hecate Strait and Queen Charlotte Sound Glass Sponge Reefs have been
identified as an ecologically and biologically significant area due to their
global geological uniqueness (Conway et al. 1991, Conway et al. 2001, and
Kruatter et al. 2001), and there is international and national recognition that
cold-water corals and sponge dominated communities can serve as key structural
habitat for many fish and invertebrate species (DFO 2010).
This area is currently in the process of being designated as an Oceans Act
Marine Protected Area (MPA) as part of the Health of the Oceans Initiative.
It has been identified as an Area of Interest in consideration of an ecosystem-
based management (EBM) approach for the Pacific North Coast Integrated
Management Area (PNCIMA), within which the reefs are located.
The proposed MPA consists of three separate areas totalling 2410 square km that
include the four glass sponge reef complexes located in Hecate Strait and Queen
Charlotte Sound, the water column and the surrounding waters, and the seabed and
subsoil to a depth of 20 meters. The three areas are referred to as the Northern
Reef, the Central Reef (containing two reef complexes), and the Southern Reef.
Each of the three areas is proposed to have three internal management zones,
referred to as the Core Protection Zone (CPZ), the Adaptive Management Zone
(AMZ) and the Vertical Adaptive Management Zone (VAMZ).
}
\source{
Lisa Lacko, Pacific Biological Station, Fisheries and Ocean Canada, Nanaimo BC
}
\references{
DFO (2012)
Identification and evaluation of biological effects and impacts of sediment to
sponge communities in Hecate Strait Pacific regional science advisory process.
October 23-25, 2012, Nanaimo BC. \cr
\url{http://www.dfo-mpo.gc.ca/csas-sccs/Schedule-Horraire/2012/10_23-25b-eng.html}
Conway, K.W., Krautter, M., Barrie, J.V. and Neuweiler, M. (2001)
Hexactinellid sponge reefs on the Canadian continental shelf: A unique \sQuote{living fossil}.
\emph{Geoscience Canada} \bold{28(20)}: 71--78.
Conway, K.W., Barrie, J.V., Austin, W.C., and Luternauer, J.L. (1991)
Holocene sponge bioherms on the western Canadian continental shelf.
\emph{Continental Shelf Research} \bold{11(8-10)}: 771--790.
DFO. (2010)
Pacific Region Cold-Water Coral and Sponge Conservation Strategy 2010-2015.
Fisheries and Oceans Canada. \bold{2010/1663}.
Krautter, M., Conway, K., Barrie, J.V., and Neuweiler, M. (2001)
Discovery of a \sQuote{living dinosaur}: globally unique modern Hexactinellid sponge reefs off British Columbia, Canada.
\emph{Facies} \bold{44}: 265--282.
}
\keyword{datasets}
|
17e10ed5653404c7b45e82f2743bb86e707bb652
|
7786980abbb9f9f92d0ba45a6b526066bc4f77b8
|
/R/plot_grouped.R
|
f18c535b960e1030eb34f9ad534d8fba42d5b647
|
[] |
no_license
|
alastairrushworth/inspectdf
|
d2fc64d31da1e903b43eea7c9aec893bb27c6759
|
5c516e3ee28c63a56622948ab612bc8f3d48ba47
|
refs/heads/master
| 2022-08-29T15:37:21.670913
| 2022-08-09T06:14:32
| 2022-08-09T06:14:32
| 157,981,172
| 251
| 23
| null | 2022-08-09T06:27:38
| 2018-11-17T12:12:30
|
R
|
UTF-8
|
R
| false
| false
| 2,028
|
r
|
plot_grouped.R
|
#' @importFrom tidyr unite
plot_grouped <- function(df, value, series, group, plot_type,
col_palette, text_labels, ylab){
# get group names
grp_attr <- attr(df, 'groups')
ngroups <- ncol(grp_attr) - 1
grp_cols <- df %>%
ungroup %>%
select(1:ngroups) %>%
mutate_all(as.character) %>%
unite(group, 1:ngroups)
# replace grouping columns with single column
df <- bind_cols(grp_cols,
df %>%
ungroup %>%
select(-(1:ngroups)))
group <- 'group'
df[is.na(df[group]), group] <- 'NA'
if(plot_type == 3){
n_df <- length(unlist(unique(df[series])))
vcols <- c("gray50", user_colours(9, col_palette)[9])
bcols <- rep(user_colours(n_df, col_palette), 2)
plt <- df %>%
ggplot(aes_string(x = group, y = value, colour = series,
group = series)) +
geom_blank() +
theme_bw() +
geom_hline(yintercept = 0, alpha = 0.5, linetype = "dashed") +
theme(panel.border = element_blank(),
panel.grid.major = element_blank(),
axis.text.x = element_text(angle = 45)) +
geom_line(size = 1.5, alpha = 0.65) +
geom_point(size = 2) +
scale_colour_manual(name = "Pair", values = bcols) +
labs(y = ylab, x = group)
}
if(plot_type == 2){
n_df <- length(unlist(unique(df[group])))
vcols <- c("gray50", user_colours(9, col_palette)[9])
bcols <- user_colours(n_df, col_palette)
plt <- df %>%
ggplot(aes_string(x = series, y = value, fill = group,
group = group, label = group)) +
geom_blank() +
theme_bw() +
theme(panel.border = element_blank(),
panel.grid.major = element_blank(),
axis.text.x = element_text(angle = 45)) +
geom_bar(stat = "identity", position = "dodge", na.rm = T) +
scale_fill_manual(values = bcols) +
guides(fill = FALSE) +
labs(y = ylab, x = "")
}
return(plt)
}
|
4d9bd0bcca6652ca3bb85fe06373aebe1149c83b
|
2b864fa89488650a9840c49f8312ebccc3fefffc
|
/ggplot2 - hadley wickham/chapter10.R
|
b0724c08743d6f70a02626076b71d8e61789d9dc
|
[] |
no_license
|
harryyang1982/r-codes
|
8147d3f70fd7cf435ecb34d1bc1acd921b75f7bd
|
89fb033f11f26c285837c0e04b74d6453c16be50
|
refs/heads/master
| 2020-12-30T11:52:12.212063
| 2018-04-12T06:25:58
| 2018-04-12T06:25:58
| 91,541,975
| 0
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,917
|
r
|
chapter10.R
|
library(ggplot2)
library(dplyr)
ggplot(diamonds, aes(x, y)) +
geom_bin2d()
filter(diamonds, x==0 | y== 0)
diamonds_ok <- filter(diamonds, x >0, y >0, y < 20)
ggplot(diamonds_ok, aes(x, y)) +
geom_bin2d() +
geom_abline(slope=1, color="white", size=1, alpha=0.5)
#10.2.2 Missing Values
x <- c(1, NA, 2)
x ==1
x >2
x+10
x == NA
x!=NA
is.na(x)
#10.2.3 Exercises
#1
diamonds %>%
filter(x==y)
diamonds %>%
filter(depth >=55, depth <70)
diamonds %>%
filter(carat < median(carat))
diamonds %>%
filter()
diamonds %>%
mutate(pcc = price / carat) %>%
filter(pcc > 10000)
diamonds %>%
filter(cut %in% c("Good", "Very Good"))
#3
diamonds %>%
filter(x >0, y >0, y < 20) %>%
ggplot(aes(x, y)) +
geom_bin2d()
diamonds %>%
filter(x >0, z >0, z < 10) %>%
ggplot(aes(x, z)) +
geom_bin2d()
diamonds %>%
filter(z>0, z<10, y>0, y<20) %>%
ggplot(aes(y,z)) +
geom_bin2d()
#4
install.packages("ggplot2movies")
library(ggplot2movies)
data(package="ggplot2movies")
data("movies")
head(movies)
movies %>%
ggplot(aes(rating)) +
geom_freqpoly(aes(color=is.na(budget)))
#5
NA & F
NA | T
NA * 0
#10.3 Create New Variables
diamonds_ok2 <- mutate(diamonds_ok,
sym = x-y,
size = sqrt(x ^ 2 + y ^ 2))
diamonds_ok2
diamonds_ok2
diamonds_ok2 %>%
ggplot(aes(size, sym)) +
stat_bin2d()
diamonds_ok2 %>%
ggplot(aes(abs(sym))) +
geom_histogram(binwidth=0.1)
diamonds_ok3 <- diamonds_ok2 %>%
filter(abs(sym) < 0.2)
diamonds_ok3 %>%
ggplot(aes(abs(sym))) +
geom_histogram(binwidth=0.01)
# 10.3.2 Exercises
?diamonds
#1
diamonds_ex <- diamonds %>%
mutate(volume = x*y*z) %>%
mutate(density = carat / volume) %>%
mutate(ppc = price / carat) %>%
mutate(lc = log(carat), lp = log(price))
#2
diamonds %>%
filter(x > 0, z > 0, z < 10) %>%
ggplot(aes(x, z)) +
stat_bin2d()
#3
diamonds %>%
mutate(new_depth = round(z/(x+y)*2*100, 1))
#4
diamonds %>%
mutate(index = ifelse(x > y, "x > y",
ifelse(x == y, "equal", "x < y"))) %>%
ggplot(aes(index)) +
geom_bar()
#10-4 Group-wise Summaries
by_clarity <- diamonds %>%
group_by(clarity) %>%
summarise(price = mean(price))
by_clarity
ggplot(by_clarity, aes(clarity, price)) +
geom_line(aes(group=1), color="grey80") +
geom_point(size=2)
ggplot(by_clarity, aes(clarity, price)) +
geom_line(aes(group=1), color="grey80") +
geom_point(size=2)
cut_depth <- diamonds %>%
group_by(cut, depth) %>%
summarise(n=n()) %>%
filter(depth > 55, depth < 70)
cut_depth
ggplot(cut_depth, aes(depth, n, color=cut)) +
geom_line()
cut_depth %>%
mutate(prop = n / sum(n)) %>%
ggplot(aes(depth, prop, color=cut)) +
geom_line()
#10.4.1 Useful Tools
diamonds %>%
summarise(n_big = sum(carat >= 4), prop_cheap = mean(price < 1000))
#10.4.2 Statistical Considerations
by_clarity <- diamonds %>%
group_by(clarity) %>%
summarise(
n=n(), mean = mean(price), fq = quantile(price, 0.25), uq = quantile(price, 0.75)
)
by_clarity %>%
ggplot(aes(clarity, mean)) +
geom_line(aes(group = 1), color="grey50") +
geom_point(aes(size = n))
data(Batting, package = "Lahman")
batters <- Batting %>%
filter(AB > 0)
ba <- batters %>%
group_by(playerID) %>%
summarize(ba = sum(H, na.rm=T) / sum(AB, na.rm = T))
ggplot(ba, aes(ba)) +
geom_histogram(binwidth = 0.01)
batters %>%
group_by(playerID) %>%
summarise(ba = sum(H, na.rm=T) / sum(AB, na.rm=T), ab=sum(AB, na.rm=T)) %>%
ggplot(aes(ab, ba)) +
geom_bin2d(bins = 100) +
geom_smooth()
ba <- batters %>%
group_by(playerID) %>%
summarise(ba = sum(H, na.rm=T) / sum(AB, na.rm=T), ab=sum(AB, na.rm=T))
ba %>%
filter(ab >=10) %>%
ggplot(aes(ab, ba)) +
geom_bin2d() +
geom_smooth()
# 10.4.3 Exercises
data(movies, package = "ggplot2movies")
glimpse(movies)
#1
movies %>%
group_by(year) %>%
summarise(budget.pct=sum(is.na(budget) / (sum(is.na(budget)+sum(!is.na(budget)))) * 100)) %>%
ggplot(aes(year, budget.pct)) +
geom_col()
#2
movies %>%
group_by(year) %>%
summarise(mean_length = mean(length, na.rm=T)) %>%
ggplot(aes(year, mean_length)) +
geom_col() +
geom_smooth(method='glm')
#3
diamonds %>%
group_by(cut) %>%
count()
diamonds %>%
group_by(color) %>%
count()
diamonds %>%
group_by(clarity) %>%
count()
diamonds %>%
group_by(cut) %>%
summarise(mean_price = mean(price)) %>%
ggplot(aes(cut, mean_price)) +
geom_col()
glimpse(diamonds)
diamonds %>%
group_by(cut) %>%
summarise(mean_size = mean(carat)) %>%
ggplot(aes(cut, mean_size)) +
geom_col()
# 4
diamonds %>%
mutate(carat_group = cut(carat, (max(carat)-min(carat))/0.1)) %>%
ggplot(aes(carat_group, carat)) +
geom_bar(stat="identity")
diamonds %>%
ggplot(aes(carat)) +
geom_histogram(binwidth=0.1)
Batting %>%
filter(AB > 200) %>%
group_by(playerID, AB) %>%
summarise(avg=sum(H, na.rm=T)/sum(AB, na.rm=T)) %>%
ggplot(aes(AB, avg)) +
geom_col()
# 10.5 Transformation Pipelines
cut_depth <- group_by(diamonds, cut, depth)
cut_depth <- summarise(cut_depth, n=n())
cut_depth <- filter(cut_depth, depth > 55, depth < 70)
cut_depth <- mutate(cut_depth, prop=n/sum(n))
cut_depth
mutate(
filter(
summarise(
group_by(
diamonds,
cut,
depth
), n= n()
),
depth > 55,
depth < 70
),
prop=n/sum()
)
cut_depth <- diamonds %>%
group_by(cut, depth) %>%
summarise(n=n()) %>%
filter(depth > 55, depth < 70) %>%
mutate(prop = n / sum(n))
# 10.5.1 Exercise
library(magrittr)
x <- runif(100)
x %>%
subtract(mean(.)) %>%
raise_to_power(2) %>%
mean() %>%
sqrt()
k <- Batting %>%
group_by(yearID, playerID) %>%
mutate(avg = H/AB) %>%
filter(AB > 0, G > 100) %>%
summarise(avg=mean(avg, na.rm=T)) %>%
arrange(desc(avg, sd.avg))
|
86d9fa7484d2949c6e2e9e8ba9af530639763dd0
|
eb2cd7490e2e4fc1c18fed6019c3888892b6bb0d
|
/Assignment_2/p9/DocFreq.R
|
568d658d21ba9135e62dbab60082958b90aaf162
|
[] |
no_license
|
ksingla025/Machine-Learning-Assignments
|
8f75e1a36de1cfdffcba7fe2826c559cb0171bb6
|
0ea8518af52b080ac6635782f1952437e2d3674d
|
refs/heads/master
| 2020-05-18T08:29:39.321179
| 2014-11-22T12:38:04
| 2014-11-22T12:38:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 156
|
r
|
DocFreq.R
|
DocFreq<-function(word) {
word_id = which(vocabulary == word)
documents_count = length(which(word_per_doc_count[, 2] == word_id))
documents_count
}
|
bb1b6c9adbbe8979a820c52b9841eecfb8a68ff9
|
339532c1047f1c4654692339478ada6c90f0420e
|
/R/unused/covplot.R
|
3d76191cc03778a22ee881bcb429535767d94be9
|
[] |
no_license
|
marcottelab/pepliner
|
1647a5541830b4f23f82295c5471b5e54c0ae4d1
|
2e21bf81d56bacdcc9c6ee75bb3cdce3a8213de4
|
refs/heads/master
| 2021-01-01T18:52:08.640836
| 2018-07-27T20:37:57
| 2018-07-27T20:37:57
| 98,455,138
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,787
|
r
|
covplot.R
|
# suppressPackageStartupMessages(library(tidyverse))
# suppressPackageStartupMessages(library(cowplot))
# suppressPackageStartupMessages(library(lazyeval))
covplot_row <- function(row,elementid){
# Dummy variable height of each rectangle. Not very important since plots are resized afterwards according to number of rows.
row$dummy_y <- 20
#auxiliary function to plot each row of the covplots
#the x-value for each row is always the length of the protein
rect <- ggplot2::ggplot(data = row, ggplot2::aes(x=max(row$Length), y = dummy_y)) +
#get rid of the grey grid in the background set by default in ggplot2
cowplot::theme_cowplot() +
#set font size
ggplot2::theme(axis.text = ggplot2::element_text(size=8)) +
#start off each plot as a blank element
ggplot2::geom_blank() +
#get rid of all axes, titles, ticks and labels
ggplot2::theme(
axis.text.y=ggplot2::element_blank(),
axis.text.x=ggplot2::element_blank(),
axis.title.x=ggplot2::element_blank(),
axis.title.y=ggplot2::element_blank(),
axis.ticks.y=ggplot2::element_blank(),
axis.ticks.x=ggplot2::element_blank(),
axis.line.y=ggplot2::element_blank(),
axis.line.x=ggplot2::element_blank()) +
#plot a grey rectangle that represents the full protein structure.
ggplot2::geom_rect(ggplot2::aes(xmin=0, xmax=max(row$Length), ymin=0, ymax=20,fill=I('grey95'))) +
#plot slightly darker vertical lines that represent intervals of 100 residues.
ggplot2::geom_vline(xintercept=seq(0, max(row$Length), by=100),color='grey56') +
#plot orange rectangles that represent the span of each peptide with respect to the full protein sequence, according to the Start and End columns.
ggplot2::geom_rect(ggplot2::aes(xmin=row$Start, xmax=row$End, ymin=0, ymax=20,fill=I('#f46d43'))) +
#Label each row according to the peptide being represented: plot a geom_text with the sequence of the peptide (head of the elementid column). Position them according to the length of the row.
ggplot2::geom_text(ggplot2::aes(label=as.character(utils::head(dplyr::select_(row,elementid)[,1],1))),size=4, position = ggplot2::position_nudge(x=-0.5*(max(row$Length)),y=-10))#as.character(head(row$Peptide,1)))
#generate a ggplot2 plot grob from the previous ggplot object
rect <- ggplot2::ggplotGrob(rect)
#remove unnecessary plot elements
rect <- cowplot::gtable_remove_grobs(rect, c('title', 'xlab-b', 'ylab-l', 'axis-b','axis-l','spacer'))
#print(rect)
#compress unused plot space
rect <- cowplot::gtable_squash_rows(rect, c(1, 2, 3, 4, 5, 7, 8, 9, 10))
rect <- cowplot::gtable_squash_cols(rect, c(1, 2, 3, 5, 6, 7))
return(rect)
#facet_grid(Peptide ~ .)
}
#' Make a coverage plot for all the peptides retrieved for a single protein.
#'
#' @param input_data Data frame to which the coverage columns will be appended.
#' @param groupid Data frame column (factor) corresponding to the names of the groups of elements in the data.
#' @param group_name (Optional) If specified, make plots from only those rows whose ID matched this argument.
#' @param elementid Data frame column (factor) corresponding to the elements for each of which a row of the final plot will be produced.
#' @param sort_column (Optional) If specified, rows in the final plot will be sorted according to this (numeric) column.
#' @importFrom dplyr select_ group_by_ summarize n filter transmute select
## @import tidyr
#' @importFrom purrr map %>%
#' @importFrom ggplot2 ggplot aes aes_ theme geom_rect geom_vline geom_text geom_blank position_nudge element_blank ggplotGrob element_text xlab xlim
#' @importFrom cowplot gtable_remove_grobs gtable_squash_rows gtable_squash_cols plot_grid ggdraw theme_cowplot
#' @importFrom utils head
## @import lazyeval
#' @return ggplot item to be plotted using cowplot::ggdraw().
## @seealso \code{\link{nchar}} which this function wraps
#' @export
#' @examples
#' library(purrr)
#' test_data <- read.csv(system.file('extdata/msdata.csv',package='pepliner'))
#' sequences <- system.file('extdata/proteome.fasta',package='pepliner')
#' cov_columns(test_data,sequences,groupid='ID',elementid='Peptide') %>%
#' covplot('Peptide','ID','sp|P55036|PSMD4_HUMAN') %>%
#' cowplot::ggdraw()
#default sort column is Start
covplot <- function(input_data,elementid,groupid='',group_name='',sort_column='Start'){
#test if there are elements that appear under different groups.
uniqueness_test <- input_data %>% dplyr::group_by_(elementid, groupid) %>%
dplyr::summarize(groupsize = n()) %>%
dplyr::filter(groupsize > 1)
if(nrow(uniqueness_test) > 0){
cat(paste0("Warning, ", nrow(uniqueness_test), " elements are not unique to groups.\nEx. peptides match multiple proteins.\n"))
}
#pre-filter columns that pertain to the plot
pre_data <- input_data[colnames(input_data)%in%c(groupid,elementid,'Start','End','Length')]
#if there is a specified group name but no ID column, stop running and show error.
if(groupid=='' & group_name!=''){
stop('Group ID column required to filter by group.')
}
#if there is a specified group Id column and a specified group name, filter data that whose Id value is equal to group_name
if(groupid!=''&group_name!=''){
data_group <- pre_data[pre_data[colnames(pre_data)==groupid]==group_name,] #not elegant, but hey! {base}
#otherwise, don't do anything
}else{
data_group <- pre_data
}
#unnecessary step to be purged. Pending.
cov_data <- data_group
#Feed peptides into cov_row function
cov_data = droplevels(cov_data)
#reorder elementid column by sort_column. Default: Start.
cov_data[,colnames(cov_data)==elementid] <- stats::reorder(cov_data[,colnames(cov_data)==elementid],cov_data[,colnames(cov_data)==sort_column])
#Feed peptides into covplot_row function
cov_data %>% split(select_(cov_data,elementid)[,1]) %>% purrr::map(covplot_row,elementid=elementid) -> cov_list
start_end <- dplyr::select(cov_data,c('Start','End'))
start_end<- dplyr::transmute(start_end,Interval=paste0((as.character(Start)),':',as.character(End)))
interval_list <- purrr::map(start_end$Interval,.f=function(x){eval(parse(text=x))})
cov_rate <- Reduce(union,interval_list) %>% length()/max(cov_data$Length) %/% 100#plot(y=rep(1,360))
xaxis_plot <- ggplot2::ggplot(data=cov_data, ggplot2::aes(x=max(cov_data$Length), y = 20)) +
cowplot::theme_cowplot() +
ggplot2::geom_blank() +
ggplot2::xlim(-2,max(cov_data[cov_data[colnames(cov_data)==groupid]==group_name,]$Length)) +
ggplot2::xlab(paste0('Coverage: ',cov_rate,'%')) +
# #get rid of all axes, titles, ticks and labels
ggplot2::theme(
axis.text.y=ggplot2::element_blank(),
axis.title.y=ggplot2::element_blank(),
axis.ticks.y=ggplot2::element_blank(),
axis.line.y=ggplot2::element_blank())
xaxis_plot <- ggplot2::ggplotGrob(xaxis_plot)
xaxis_plot <- cowplot::gtable_remove_grobs(xaxis_plot, c('title', 'axis-l','spacer','axis-r','axis-t','panel','xlab-t','subtitle','caption','ylab-l','ylab-r'))
#xaxis_plot <- cowplot::gtable_squash_rows(xaxis_plot, c(1, 2, 3, 4, 5, 7, 8, 9, 10))
xaxis_plot <- cowplot::gtable_squash_cols(xaxis_plot, c(1, 2, 3, 5, 6, 7, 8, 9, 10))
xaxis_plot <- cowplot::gtable_squash_cols(xaxis_plot, c(1, 2, 3, 5, 6, 7,8,9,10))
#Aggregate ggplot objects
clusterplot <- cowplot::plot_grid(plotlist = cov_list, ncol=1, align = "v")
cowplot::plot_grid(clusterplot,xaxis_plot,align='h',nrow=2,rel_heights = c(25,1))
}
|
b2fdfe53f029decf9a72e4305314c30feb5f98c8
|
52f9424c4009606f818d90d3b28022711979f5d8
|
/plot5.R
|
c53e35584eb1719337999b0793f40a0916eaf97f
|
[] |
no_license
|
sun33170161/ExData_Plotting2
|
c32b42bb473710eb9944d802ce3c9d91c7c0fa1a
|
145dd4963ca61eafc35e369616f113d792d6339e
|
refs/heads/master
| 2021-01-10T03:07:07.179362
| 2015-05-24T17:16:42
| 2015-05-24T17:16:42
| 36,183,371
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 546
|
r
|
plot5.R
|
# Load Data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Plot
png("plot5.png")
# Select Coal Combustion Related data
SCC.Motor <- SCC[grep("motor vehicle", SCC$Short.Name, ignore.case = T), c("SCC", "Short.Name")]
data <- subset(NEI, subset = NEI$SCC %in% SCC.Motor$SCC)
data <- data[data$fips == "24510", ]
data.groups.sum <- with(data, tapply(Emissions, year, sum))
plot(data.groups.sum ~ names(data.groups.sum), type = "l", xlab = "year", ylab = "sum")
title("Trend of Motor Vehicle")
dev.off()
|
103a86e1bf8e4e76ae6535128638db6694efd904
|
1f294ea77e05ecb7ce682c84eebe7e8e2a398585
|
/R/prepare_response_variables.R
|
28cc59ad3d9be1aa6df6a32472aca5e3d660cd06
|
[] |
no_license
|
coreytcallaghan/JBI-20-0736
|
68c915b3724df54fe7d7ad21f4db3232fcabf49f
|
c597d885fc7fd370161b7e8aa7a05075c7049ebf
|
refs/heads/main
| 2023-02-18T17:02:47.576843
| 2021-01-19T08:10:17
| 2021-01-19T08:10:17
| 330,902,828
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,831
|
r
|
prepare_response_variables.R
|
## This scrip reads in the summaries
## exported from the 'make_ggridges_and_get_urbanness.R' script
## and then saves out a couple RDSs of 'response variables'
## I also do some manual filtering of the species included
## ensuring tha they largely occur in the United States
# packages
library(dplyr)
library(purrr)
library(readr)
# read in ABA codes
aba_codes <- read_csv("Data/ABA_codes/ABA_Checklist-8.0.5.csv") %>%
dplyr::select(2, 3, 5) %>%
rename(COMMON_NAME=X2) %>%
rename(SCIENTIFIC_NAME=X3) %>%
rename(ABA_Code=X5) %>%
dplyr::filter(complete.cases(ABA_Code))
# now join species with ABA code
potential_species <- read_csv("Data/list_of_potential_species.csv") %>%
rename(ebird_COMMON_NAME=COMMON_NAME) %>%
left_join(., read_csv("Data/clements_clean.csv"), by="ebird_COMMON_NAME") %>%
rename(COMMON_NAME=ebird_COMMON_NAME) %>%
dplyr::select(COMMON_NAME, ebird_SCIENTIFIC_NAME) %>%
distinct() %>%
left_join(aba_codes, by="COMMON_NAME")
# Now I want to remove any species that have a code 3, 4, or 5 which means they are
# relatively uncommon occurrences in the ABA area
# some species don't join because they aren't on the ABA checklist and should also be removed
# also, I want to keep Gray-headed Swamphen as this species is on the checklist
# but as a different species name
species_to_keep <- potential_species %>%
dplyr::filter(ABA_Code %in% c(1, 2)) %>%
bind_rows(., potential_species %>%
dplyr::filter(COMMON_NAME=="Gray-headed Swamphen")) %>%
.$COMMON_NAME
# read in monthly dat
setwd("Data/species_monthly_summaries")
monthly_dat <- list.files(pattern = ".RDS") %>%
map_dfr(readRDS) %>%
dplyr::filter(COMMON_NAME %in% species_to_keep)
length(unique(monthly_dat$COMMON_NAME))
setwd("..")
setwd("..")
saveRDS(monthly_dat, "Data/response_variables.RDS")
|
698d61107214314ecc95830833ece0c7119fdf30
|
b2ef0c59f3bb43b33db35f7fdd221c803ef33bb2
|
/Intro/autocorrelation.R
|
12f2b0d8290cb7789a99bf5c36cae5eca8cf924c
|
[] |
no_license
|
jocoder22/R_DataScience
|
086a4fc9ba27a3a3435d8479ae983c41e45834c9
|
44d5c24b1674e3a05a66eed122eed819dad4dabf
|
refs/heads/master
| 2020-07-23T20:37:47.927767
| 2019-11-06T04:17:38
| 2019-11-06T04:17:38
| 207,699,600
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 772
|
r
|
autocorrelation.R
|
library(timeSeries)
library(astsa)
library(quantmod)
# Download stocks
symbols <- c("AMZN", "AAPL", "TSLA", "MSFT")
getSymbols(symbols)
amazon <- getSymbols("AMZN", auto.assign = F)$`AMZN.Adjusted`
getSymbols(symbols, from="2016-10-29")
stocks <- Ad(merge(AMZN, AAPL, TSLA, MSFT))
returns <- returns0(EuStockMarkets)
len <- length(EuStockMarkets[, 1])
periodicity(amazon)
# Compute correlations with lag 1
cor(EuStockMarkets[-len, 1], EuStockMarkets[-1, 1])
# Compute correlations with lag 1
cor(EuStockMarkets[-((len - 1):len), 1], EuStockMarkets[-(1:2), 1])
# use acf to compute the correlations for many lags
acf(EuStockMarkets, lag.max = 6, type = "correlation", plot = FALSE)
acf(amazon, lag.max = 6, plot = FALSE)
acf(stocks, lag.max = 6, plot = FALSE)
|
2f64aea07e80aa7c022a38592e2f2b05bd0b24da
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/factorstochvol/man/covmat.fsvdraws.Rd
|
a707e0841cb6b7fdc6b3958fbeeac01957ecafeb
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,998
|
rd
|
covmat.fsvdraws.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities_fsvdraws.R
\name{covmat.fsvdraws}
\alias{covmat.fsvdraws}
\title{Extract posterior draws of the model-implied covariance matrix}
\usage{
\method{covmat}{fsvdraws}(x, timepoints = "all", ...)
}
\arguments{
\item{x}{Object of class \code{'fsvdraws'}, usually resulting from a call
of \code{\link{fsvsample}}.}
\item{timepoints}{Vector indicating at which point(s) in time (of those that
have been stored during sampling) the correlation matrices should be
extracted. Can also be "all" or "last".}
\item{...}{Ignored.}
}
\value{
Array of dimension \code{m} times \code{m} times \code{draws}
times \code{timepoints} containing the posterior draws for the
model-implied covariance matrix.
}
\description{
\code{covmat} extracts draws from the model-implied covariance matrix
from an \code{fsvdraws} object for all points in time which have been
stored.
}
\note{
Currently crudely implemented as a double loop in pure R,
may be slow.
}
\examples{
\donttest{
set.seed(1)
sim <- fsvsim(n = 500, series = 3, factors = 1) # simulate
res <- fsvsample(sim$y, factors = 1, keeptime = "all") # estimate
covs <- covmat(res, "last") # extract
# Trace plot of determinant of posterior covariance matrix
# at time t = n = 500:
detdraws <- apply(covs[,,,1], 3, det)
ts.plot(detdraws)
abline(h = mean(detdraws), col = 2) # posterior mean
abline(h = median(detdraws), col = 4) # posterior median
abline(h = det(covmat(sim, "last")[,,1]), col = 3) # implied by DGP
# Trace plot of draws from posterior covariance of Sim1 and Sim2 at
# time t = n = 500:
ts.plot(covs[1,2,,1])
abline(h = covmat(sim, "last")[1,2,1], col = 3) # "true" value
# Smoothed kernel density estimate:
plot(density(covs[1,2,,1], adjust = 2))
# Summary statistics:
summary(covs[1,2,,1])
}
}
\seealso{
Other extractors:
\code{\link{cormat.fsvdraws}()},
\code{\link{runningcormat}()},
\code{\link{runningcovmat}()}
}
\concept{extractors}
|
6fa73a89988b7357e2477c45e74a1668728c56d4
|
76f4709e9a63caf474181ce3711af32528ce83d3
|
/man/append_values.Rd
|
28230e56d6e84a35d4bcb0363803b70e9bace63a
|
[
"Apache-2.0"
] |
permissive
|
vats-div/tidyjson
|
aee5fb91624dcaba951e1c8e606e9aad148e7767
|
4129f886e78bd26fb34be1b286b68d94c7a21fab
|
refs/heads/master
| 2021-01-17T05:04:54.592904
| 2015-02-24T17:32:04
| 2015-02-24T17:32:04
| 30,717,563
| 0
| 0
| null | 2015-02-12T18:41:24
| 2015-02-12T18:41:24
| null |
UTF-8
|
R
| false
| false
| 773
|
rd
|
append_values.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/append_values.r
\name{append_values}
\alias{append_values}
\alias{append_values_logical}
\alias{append_values_number}
\alias{append_values_string}
\title{Append keys to a new column}
\usage{
append_values_string(x, column.name = type, force = TRUE)
append_values_number(x, column.name = type, force = TRUE)
append_values_logical(x, column.name = type, force = TRUE)
}
\arguments{
\item{x}{a tbl_json object}
\item{column.name}{the column.name to append the values into the data.frame
under}
\item{force}{parameter that determines if the variable type should be computed or not
if force is FALSE, then the function may take more memory}
}
\description{
Append keys to a new column
}
|
95079248c38c05fe089a682720e9aea27402413a
|
47dc57d3a38ee0c3d43b71ea464a6b0b925d6649
|
/00 - Memos/Memo_K-Means_Clustering.R
|
444e4c9f751fbe3b6bbc1b9c3bf8a6d2c6f943df
|
[] |
no_license
|
arobert1976/Data-Science-Foundations-using-R
|
2e8619282a342f750e9997c04735238071159c2e
|
550c225c05a03a4019fa70dcf9fbe0e3fcead9e0
|
refs/heads/main
| 2023-01-19T07:57:03.679812
| 2020-11-20T13:36:03
| 2020-11-20T13:36:03
| 314,508,595
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,056
|
r
|
Memo_K-Means_Clustering.R
|
# Creating 3 clusters
set.seed(1234)
par(mar=c(0,0,0,0)) #sets the margins to 0
x = rnorm(12, mean=rep(1:3, each=4), sd=0.2) #12 valeurs : 4 autour de 1, 4 autour de 2, 4 autour de 3.
y = rnorm(12, mean=rep(c(1,2,1), each=4), sd=0.2) #12 valeurs : 4 autour de 1, 4 autour de 2, 4 autour de 1.
plot(x, y, col="blue", pch=19, cex=2)
text(x+0.05, y+0.05, labels=as.character(1:12))
# Creating the kmeans object
df = data.frame(x=x,y=y)
kmeansObj = kmeans(df, centers=3) #K=3
names(kmeansObj)
kmeansObj$cluster
# Plotting the clusters with their centers
plot(x, y, col=kmeansObj$cluster, pch=19, cex=2)
text(x+0.05, y+0.05, labels=as.character(1:12))
points(kmeansObj$centers, col=1:3, pch=3, cex=2, lwd=3)
# Another way to look at the result of a K-means is to display a heatmap
dataMatrix = as.matrix(df)
par(mfrow=c(1,2), mar=c(2,4,0.1,0.1))
image(t(dataMatrix)[,nrow(dataMatrix):1], yaxt="n") #les données de mon dataframe
image(t(dataMatrix)[,order(kmeansObj$cluster)], yaxt="n") #les mêmes données regroupées par cluster
|
0c7cbec0968f13f7c0078edd9bf539c5eb34f883
|
2e8213b1ea0567d6c8a2af7b20fcee29abc13a55
|
/man/spatial_query.Rd
|
a48e391cec991bb0755a718df4a9ac6a17b3bc36
|
[
"MIT"
] |
permissive
|
monicalosu/getarc
|
0b17d7754ba1e8344e32b505a5be765bc19e76c2
|
b8f783a839523bcc14b11da3b97f3cf963eda1fa
|
refs/heads/master
| 2023-04-06T14:03:33.366167
| 2021-03-27T07:36:50
| 2021-03-27T07:36:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 850
|
rd
|
spatial_query.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spatial_query.R
\name{spatial_query}
\alias{spatial_query}
\title{Spatial Query}
\usage{
spatial_query(x, spatial_filter = "esriSpatialRelIntersects")
}
\arguments{
\item{x}{an sf or sfc object}
\item{spatial_filter}{the spatial relationship of the filter to specify in the query. Default is esriSpatialRelIntersects.
Options are: esriSpatialRelIntersects, esriSpatialRelContains, esriSpatialRelCrosses, esriSpatialRelEnvelopeIntersects,
esriSpatialRelIndexIntersects, esriSpatialRelOverlaps, esriSpatialRelTouches or esriSpatialRelWithin.}
}
\value{
a list with geometry, geometryType, SpatialRel and inSR
}
\description{
Convert SF/SFC to spatial query.
}
\details{
This function accepts an sf/sfc object and converts it to a spatial query fitting the esri api spec.
}
|
2cce6d1d020589da7c50d18e8a96ef6df2c11bee
|
274c11c96f4976067674e3c76b7f490aba020123
|
/man/branch.Rd
|
849023298ed57f2746e54e0e3d63131d5c626bac
|
[] |
no_license
|
FvD/gert
|
2b169acfd21a1179a946bedc042e43f7d7d904f9
|
5c2de88c1b07a140b774d1240c07b06a94e6dae1
|
refs/heads/master
| 2020-06-06T16:04:26.407104
| 2019-06-18T14:05:28
| 2019-06-18T14:05:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,153
|
rd
|
branch.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/branch.R
\name{branch}
\alias{branch}
\alias{git_branch_list}
\alias{git_branch_checkout}
\alias{git_branch_create}
\alias{git_branch_delete}
\alias{git_branch_fast_forward}
\title{Git Branch}
\usage{
git_branch_list(repo = ".")
git_branch_checkout(branch, force = FALSE, repo = ".")
git_branch_create(name, ref = "HEAD", checkout = TRUE, repo = ".")
git_branch_delete(name, repo = ".")
git_branch_fast_forward(ref, repo = ".")
}
\arguments{
\item{repo}{a path to an existing repository, or a \code{git_repository} object as
returned by \link{git_open}, \link{git_init} or \link{git_clone}.}
\item{branch}{name of branch to check out}
\item{force}{ignore conflicts and overwrite modified files}
\item{name}{string with name of the branch / tag / etc}
\item{ref}{string with a branch/tag/commit}
\item{checkout}{move HEAD to the newly created branch}
}
\description{
Create, list, and checkout branches.
}
\seealso{
Other git: \code{\link{commit}}, \code{\link{fetch}},
\code{\link{git_config}}, \code{\link{repository}},
\code{\link{signature}}
}
\concept{git}
|
4f3bad608953057ebcdd8ae46969afeff5569c77
|
c9cf13661b5635be5f2e1791bb5a5bb8efcd3beb
|
/R/search_for.R
|
3faf1cb85a943feed22e6eb672072d2d9f8ae49b
|
[] |
no_license
|
annamariakl/antiplugr
|
7c7c0bfbd7a65a431a26a88f3e155d5232eecb74
|
74a4a217bf770f6d017e1759af3a390a72b00124
|
refs/heads/master
| 2020-03-24T20:16:44.889970
| 2018-09-02T20:05:01
| 2018-09-02T20:05:01
| 142,969,280
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,853
|
r
|
search_for.R
|
#' Search for similar sentences
#'
#' \code{search_for} is used to search for similar or exact sentences in a PDF.
#'
#' @param x File name/path of the PDF.
#' @param sen Sentence to be used to search in the text.
#' @param exact If you search for the exact sentence, the default is FALSE and the
#' cosine distance is used as similarity measurement.
#' @param cos_sim Similarity parameter of the cosine distance. The output contains
#' sentences which have cosine similarity greater or equal 'cos_sim'. The default
#' is 0.5.
#'
#' @return A tibble data frame that contains the measured cosine similarity and
#' the location of the match, the page number and the sentence number.
#'
#' @importFrom pdftools pdf_text
#' @importFrom quanteda tokens
#' @importFrom tm VCorpus VectorSource DocumentTermMatrix
#' @importFrom lsa cosine
#' @importFrom tibble tibble
#'
#' @examples
#' # PDF from Book Reports,
#' # URL: https://www.bookreports.info/hansel-and-gretel-summary/
#' file <- system.file('pdf', 'summary_hansel_and_gretel.pdf', package = 'antiplugr')
#'
#' # a similar sentence from 'grimm_hanse_and_gretel.pdf' from Short Story America,
#' # URL: http://www.shortstoryamerica.com/pdf_classics/grimm_hanse_and_gretel.pdf
#' sen_1 <- "When four weeks had passed and Hansel was still thin, impatience overcame her, and she would wait no longer."
#'
#' # an exact sentence
#' sen_2 <- "When four weeks had passed and Hansel was still thin, the witch got tired."
#'
#' search_for(file, sen_1)
#' search_for(file, sen_2, exact = TRUE)
#'
#' @export
search_for <- function(x, sen, exact = FALSE, cos_sim = 0.5){
# read in text with pdf_text() from the pdftools package
text <- pdftools::pdf_text(x)
# tokenize the text with tokenize() from the quanteda package
text_sen <- quanteda::tokens(text, what = "sentence", remove_numbers = TRUE,
remove_punc = TRUE, remove_symbols = TRUE,
remove_hyphens = TRUE, remove_separators = TRUE)
# lower the letters of the text
text_sen <- quanteda::tokens_tolower(text_sen)
# assignment for ouput
sen_nums <- cumsum(lapply(text_sen, length))
# remove internal white space
text_sen <- gsub("\\s+"," ", text_sen)
# if we are searching for the exact similar sentence
if (exact == TRUE) {
sen_loc <- lapply(sen, grep, text_sen)
# prepare the output
sen_loc_un <- unlist(sen_loc)
pages <- findInterval(sen_loc_un, c(1, sen_nums))
sen_nums2 <- c(0, sen_nums)
sen_num <- as.integer(sen_loc_un - sen_nums2[pages])
output <- tibble::tibble(match = rep("perfect match", sapply(sen_loc, length)),
page = pages, sen_num = sen_num)
return(output)
} else {
# prepare the text and the sentence for comparing:
# add sentence to text vector
text_sen[length(text_sen) + 1] <- sen
# create corpus
text_corp <- tm::VCorpus(VectorSource(text_sen))
# create document term matrix
text_dtm <- tm::DocumentTermMatrix(text_corp,
control = list(removePunctuation = TRUE,
stopwords=TRUE))
text_dtm <- as.matrix(text_dtm)
# comparing with cosine similarity
text_sim <- apply(text_dtm[-nrow(text_dtm), ], 1,
lsa::cosine, text_dtm[nrow(text_dtm), ])
# select the sentences with a cosine similarity greater or equal 'cos_sim'
sim_select <- text_sim[which(text_sim >= cos_sim)]
# prepare the output
sim_num <- as.integer(names(sim_select))
pages <- findInterval(sim_num, c(1, sen_nums))
sen_nums2 <- c(0, sen_nums)
sen_num <- as.integer(sim_num - sen_nums2[pages])
output <- tibble::tibble(cos_sim = sim_select, page = pages,
sen_num = sen_num)
return(output)
}
}
|
80f95d6726ebb1f8468e380bd45fb16578477431
|
655988e12085dcc1b11cd59d13d6372cff7c49ad
|
/R/Imports.R
|
37e0b000f7993241bb7d4a5d877f0580824ee816
|
[] |
no_license
|
griu/RemixAutoML
|
0952ba173e27267d87f3eb7b51d4530196c8219e
|
9d60292869ad36b86e7c0aa49328727ed85be366
|
refs/heads/master
| 2020-07-11T10:27:04.039578
| 2019-08-26T06:46:11
| 2019-08-26T06:46:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 356
|
r
|
Imports.R
|
#' @import data.table
#' @import foreach
#' @importFrom data.table data.table %chin% .I .N .SD := as.data.table fwrite is.data.table rbindlist set setcolorder setnames setorderv as.IDate as.ITime
#' @importFrom lubridate %m+%
#' @importFrom foreach %dopar%
#' @importFrom stats optimize pchisq
#' @import doParallel
NULL
.datatable.aware = TRUE
|
319967ac9d7a86f0afef0e4d8a1f0a69fe352889
|
416550c21c0e3f49ae34ef843b4c352910c3c2f9
|
/man/detectPeaks.Rd
|
8966271cc8ee566808d459840694654afb7eb51d
|
[] |
no_license
|
thomasp85/MSsary
|
34dc8e93fd13a33ba6f78598626bb134d6cb151c
|
bf182b67b072256c4ff16b8c72678109f899ecc5
|
refs/heads/master
| 2021-01-22T12:12:39.641522
| 2015-01-26T11:44:40
| 2015-01-26T11:44:40
| 25,297,627
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 276
|
rd
|
detectPeaks.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/generics.R
\name{detectPeaks}
\alias{detectPeaks}
\title{Detect peaks in chromatographic MS data}
\usage{
detectPeaks(object, ...)
}
\description{
Detect peaks in chromatographic MS data
}
|
e455200fa528d4eb5f726c688f7bb3fb9d7a5437
|
0d7d795e5015f890e4b518bbebb0c199d4aa8e4f
|
/PBSddiff/R/pcod_iscam.r
|
4b724357261dbbcd5be53bcc7c7124e450611aa0
|
[] |
no_license
|
pbs-software/pbs-ddiff
|
e46645e91dc4dbfd1b30b1da234f8651ae6bb377
|
b46f1266f3eac21a29fae3e9738fdd9ee74c7e4f
|
refs/heads/master
| 2023-07-08T20:54:51.247865
| 2023-06-22T22:42:11
| 2023-06-22T22:42:11
| 100,047,840
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 53,872
|
r
|
pcod_iscam.r
|
#**********************************************************************************
# pcod_iscam.r
# This file contains the code for a front end GUI controller for pcod_iscam using
# Tcl/Tk windows implemented using the R package 'PBSModelling'. The data
# structure used is an opList, which is a list of lists, see loadScenarios.r for
# details on this structure. This file assumes that an object called 'opList'
# exists and is a valid opList.
#
# Author : Chris Grandin/Robyn Forrest
# Development Date : December 2011 - February 2012
#
# Source this file, then call pcod_iscam() with whatever arguments you need.
#
# pcod_iscam(reload=F,copyADMBExecutables=F,silent=F)
#
# To change/create global variables, find the function assignGlobals() in this file
#
#**********************************************************************************
removeAllExcept <- function(vars="opList", envir = .GlobalEnv) {
# removeAllExcept()
# removes everything in the workspace except for what's in the vars list
# Upon finishing, the workspace will contain whatever is in the vars list
# plus the objects 'removeAllExcept' (this function) and 'modelLoaded'
# which tells the software that the model has already been loaded.
# - vars - A list of objects to keep, typically just 'opList'
# - envir - environment to clear, typically .GlobalEnv
vars <- c(vars, "removeAllExcept", "so","Rcode")
keep <- match(x = vars, table = ls(all=T,envir = envir))
#browser();return()
if(any(is.na(keep))){
assign("modelLoaded",FALSE,envir=.GlobalEnv)
}else{
rm(list=ls(all=T,envir=envir)[-keep],envir=envir)
assign("modelLoaded",TRUE,envir=.GlobalEnv)
}
}
if (!exists("usingSweave",envir=.GlobalEnv) || !usingSweave){ ## This is set to TRUE in `modelResults.Rnw'
removeAllExcept(vars=NULL)
usingSweave = FALSE
}
#require(Hmisc)
require(MASS)
require(KernSmooth)
require(MCMCpack)
require(coda)
require(PBSmodelling) ## changes from PBSmodelling (PBStools loads PBSmodelling)
require(grDevices)
modelDir = "C:/Users/haighr/Files/Archive/Bat/"
## IMPORTANT -- each user should define a directory that contains `iscamdelaydiff.exe';
## this directory will be added to the path seen by R's Sys.getenv()["PATH"]
options(stringsAsFactors=FALSE)
## make warnings print out when they occur instead of at the end
options(warn=1)
if (!exists("usingSweave",envir=.GlobalEnv) || !usingSweave) {
## Standardise the quantiles to avoid confusion
quants3 = c(0.05, 0.50, 0.95)
quants5 = c(0.05, 0.25, 0.50, 0.75, 0.95)
## Choose the number of projection years (max=5)
Nproj = 2
}
# Runtime stats constants for plotting
.MAXGRAD <- 0.0001
.FUNEVALS <- 150
.PCHCODE <- 16
source("cribtab.r")
source("calcHRP.r")
source("reptolist.r")
#source("pcod_iscamFriedEgg.r")
source("pcod_iscamExecutiveSummary.r")
source("pcod_iscamFigs.r")
source("pcod_iscamTables.r")
source("pcod_iscamSens.r")
source("pcod_iscamRetro.r")
source("pcod_iscamADMBFileControl.r")
source("pcod_iscamUtils.r")
source("pcod_iscamLoadScenarios.r")
cat("Type pcod_iscam() to start GUI\n")
cat("Optional arguments: pcod_iscam(reload=T,silent=F,copyADMBExecutables=F)\n\n")
pcod_iscam <- function(reloadScenarios=TRUE, copyADMBExecutables=FALSE, silent=FALSE){
# pcod_iscam()
# launches the CCAM GUI.
# - reloadScenarios T/F - reload the data from all model output files in all scenarios.
# - copyADMBExecutables T/F copy the admb executable from admb folder to each scenario folder.
# - silent T/F - show messages on command line
#graphics.off() # Destroy graphics window if it exists
closeWin() ### GUI doesn't seem to refresh properly
loadData(reloadScenarios=reloadScenarios,
copyADMBExecutables=copyADMBExecutables,
silent=silent)
.pcod_iscamGUIsetup(win="pcod_iscamGui", silent=silent)
invisible()
}
loadData <- function(reloadScenarios=TRUE, copyADMBExecutables=FALSE, silent=FALSE){
# loads all model output data from all scenarios.
# - reloadScenarios T/F - reload the data from all model output files in all scenarios.
# - copyADMBExecutables T/T copy the admb executable from admb folder to each scenario folder.
# this is used for the case where you want to run the model from inside its scenario folder
# which makes it possible to run multiple models at one time on multi-processor machines.
# - silent T/F - show messages on command line
if(!exists("modelLoaded",envir=.GlobalEnv)){
modelLoaded <<- FALSE
}
if(reloadScenarios || !modelLoaded){
loadScenarios(silent=silent)
modelLoaded <<- TRUE
if(!silent){
cat("loadData: Loading data from model output files.\n")
}
}else{
if(!silent){
cat("loadData: Warning! Using previously loaded data for GUI.\n\n")
}
}
if(copyADMBExecutables){
copyExecutableToScenariosFolders(silent=silent)
}
}
.pcod_iscamGUIsetup <- function(win, silent=FALSE){
if(win=="pcod_iscamGui"){
viewHeader <- data.frame()
viewSensitivityGroups <- data.frame()
for(scenario in 1:length(opList)){
viewHeader <- rbind(viewHeader,opList[[scenario]][[1]])
sengroup = opList[[scenario]][[4]]$SensitivityGroup
if (is.null(sengroup)) sengroup = 0
viewSensitivityGroups <- rbind(viewSensitivityGroups,sengroup)
}
colnames(viewHeader) <- "Scenario List"
colnames(viewSensitivityGroups) <- "Sensitivity Group"
scenarioHeader <- cbind(viewHeader,viewSensitivityGroups)
#browser();return()
assign("viewHeader", viewHeader,envir=.GlobalEnv)
assign("viewSensitivityGroups", viewSensitivityGroups,envir=.GlobalEnv)
assign("scenarioHeader", scenarioHeader,envir=.GlobalEnv)
assign("scenarioList", as.numeric(rownames(viewHeader)), envir=.GlobalEnv)
assign("A", opList[[1]][[4]], envir=.GlobalEnv) ## the 4th member in each model list is the data
createWin(paste(getwd(),"/",win,"Win.txt",sep=""),env=.GlobalEnv)
#winList <- c(entryScenario=1)
winList <- list(entryScenario=1, fdScenarios=fdScenarios, burn=opList[[1]][[4]]$mc.burn)
try(setWinVal(winList), silent=silent)
.loadPlottingLimits()
# Set Base as default start model, assumes Base
assignGlobals(scenario=1, gui=TRUE)
}
}
assignGlobals <- function(scenario=1, silent=FALSE, gui=TRUE){
# assignGlobals()
# assigns global variables used in the model, directory names, and the data object 'A'
# used for plotting.
# - scenario - which scenario to set up
# - silent T/F - show messages on command line
# A is the pcod_iscam model output object
#browser();return()
assign("scenarioCurr",scenario,envir=.GlobalEnv) ## primarily for writing ALL plot and tables
assign("A",opList[[scenario]][[4]],envir=.GlobalEnv)
assign("figDir",opList[[scenario]][[2]],envir=.GlobalEnv)
assign("tabDir",opList[[scenario]][[3]],envir=.GlobalEnv)
# saveon is a toggle for writing figures to disk
assign("saveon",FALSE,envir=.GlobalEnv)
# model variables - ideally these are read in not hardwired like this
assign("age",opList[[scenario]][[4]]$age,envir=.GlobalEnv)
assign("nage",age[length(age)],envir=.GlobalEnv)
assign("yr",opList[[scenario]][[4]]$yr,envir=.GlobalEnv)
assign("yrs",opList[[scenario]][[4]]$yrs,envir=.GlobalEnv)
assign("nyrs",length(yrs),envir=.GlobalEnv)
assign("nyear",length(yr),envir=.GlobalEnv)
assign("nyr",yr[length(yr)],envir=.GlobalEnv)
assign("ngear",opList[[scenario]][[4]]$ngear,envir=.GlobalEnv)
assign("delaydiff",opList[[scenario]][[4]]$delaydiff,envir=.GlobalEnv)
assign("Burn", opList[[scenario]][[4]]$mc.burn, envir=.GlobalEnv)
# catch streams based on estimated parameters
# assign(".FMSYFORCFLAG",-999,envir=.GlobalEnv) # Needed by admb for catch streams (es.table.h)
# assign(".F40FORCFLAG",-888,envir=.GlobalEnv) # based on estimated values
# assign(".SSFORCFLAG",-777,envir=.GlobalEnv) # SS ABC catch stream
# Set age comp data
#@@@@@@@@@@@@@@ RF JULY 2012 additions @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#RF made this change so that the code doesn't try and read in age observations that aren't there in the ageless model (cntrl 14 == 3)
assign("AgeLikelihood", opList[[scenario]][[4]]$cntrl[14], envir=.GlobalEnv)
#browser();return()
if(AgeLikelihood!=3){ #do not do this if model is 'ageless' 3 is the only survey with age obs right now
# assign("Acom_obs",opList[[scenario]][[4]]$A[which(opList[[scenario]][[4]]$A[,2]==1),],envir=.GlobalEnv)
assign("Asurv_obs",opList[[scenario]][[4]]$A,envir=.GlobalEnv) #[which(opList[[scenario]][[4]]$A[,2]==3),]
# assign("Acom_est",opList[[scenario]][[4]]$Ahat[which(opList[[scenario]][[4]]$Ahat[,2]==1),],envir=.GlobalEnv)
assign("Asurv_est",opList[[scenario]][[4]]$Ahat,envir=.GlobalEnv)
#assign("Acom_res",opList[[scenario]][[4]]$A_nu[which(opList[[scenario]][[4]]$A_nu[,2]==1),],envir=.GlobalEnv)
assign("Asurv_res",opList[[scenario]][[4]]$A_nu,envir=.GlobalEnv)
}
assign("nits",opList[[scenario]][[4]]$nits,envir=.GlobalEnv) # num survey indices
# Set plot output type
assign("plotType","png",envir=.GlobalEnv)
# Set plot globals
# mt variables control the management target line
assign("mtLineColor","#009E73",envir=.GlobalEnv) ## Upper Stock Reference (colour-blind bluegreen)
assign("lrpLineColor","#CC79A7",envir=.GlobalEnv) ## Lower/Limit Reference Point (colour-blind redpurple)
assign("mtLineType",3,envir=.GlobalEnv)
assign("mtLineWidth",3,envir=.GlobalEnv)
#assign("mpdLineColor","#0072B2",envir=.GlobalEnv) ## MPD lines (colour-blind blue) a bit too dark?
assign("mpdLineColor","#56B4E9",envir=.GlobalEnv) ## MPD lines (colour-blind skyblue) a bit too light?
.setBurnThin(silent=silent, gui=gui)
}
.setBurnThin <- function(silent=FALSE, gui=TRUE){
val <- getWinVal()
if (gui) assign("Burn",val$burn,envir=.GlobalEnv)
else setWinVal(list(burn=Burn))
assign("Thin",val$thin,envir=.GlobalEnv)
assign("Nbin",val$nbin,envir=.GlobalEnv)
}
.subView <- function(silent=F){
act <- getWinAct()
val <- getWinVal()
# scenarioList is a list of the scenario names (i.e. folder names)
scenarioList <- as.numeric(rownames(viewHeader))
if(length(act)>1)
act <- act[1]
if(act=="prevScenario"){
prevScenario <- val$entryScenario-1
if(prevScenario<as.numeric(min(scenarioList))){
prevScenario <- as.numeric(min(scenarioList))
}
setWinVal(c(entryScenario=prevScenario))
assignGlobals(prevScenario, gui=FALSE)
.loadPlottingLimits()
}else if(act=="nextScenario"){
nextScenario <- val$entryScenario+1
if(nextScenario>as.numeric(max(scenarioList))){
nextScenario <- as.numeric(max(scenarioList))
}
setWinVal(c(entryScenario=nextScenario))
assignGlobals(nextScenario, gui=FALSE)
.loadPlottingLimits()
}else if(act=="changeEntryScenario"){
assignGlobals(val$entryScenario, gui=FALSE)
.loadPlottingLimits()
}else if(act=="prevSens"){
prevSens <- val$entrySensitivityGroup - 1
setWinVal(c(entrySensitivityGroup=prevSens))
}else if(act=="nextSens"){
nextSens <- val$entrySensitivityGroup + 1
setWinVal(c(entrySensitivityGroup=nextSens))
}else if(act=="writePlots"){
assignGlobals(getWinVal()$entryScenario, gui=TRUE) ## make sure you have selected Scenario
.writePlots()
}else if(act=="writeTables"){
assignGlobals(getWinVal()$entryScenario, gui=TRUE) ## make sure you have selected Scenario
.writeTables()
}else if(act=="writeAllPlots"){
.writeAllPlots(gui=FALSE)
}else if(act=="writeAllTables"){
.writeAllTables(gui=FALSE)
}else if(act=="writeSensPlots"){
.writeSensPlots()
}else if(act=="writeRetroPlots"){
.writeRetroPlots()
}else if(act=="runCurrScenario"){
.runCurrScenario()
}else if(act=="changeBurnThin"){
.setBurnThin()
}else if(act=="changeSelectivityGroup"){
}else if(act=="changeSensStatus"){
.writeSensitivityGroups()
}else if(act=="runRetros"){
.runRetros()
}else if(act=="runAllRetros"){
.runAllRetros()
}else if(act=="changeBiomassYlim"){
# Set the sensitivity and retro ylimit entry boxes and check boxes
opList[[val$entryScenario]][[4]]$biomassYlim <<- val$biomassYlim
opList[[val$entryScenario]][[4]]$maxBiomassYlim <<- val$maxBiomassYlim
winList <- c(biomassSensYlim=val$biomassYlim,
maxBiomassSensYlim=val$maxBiomassYlim,
biomassRetroYlim=val$biomassYlim,
maxBiomassRetroYlim=val$maxBiomassYlim)
try(setWinVal(winList), silent=silent)
}else if(act=="changeBiomassSensYlim"){
# Set the base and retro ylimit entry boxes and check boxes
opList[[val$entryScenario]][[4]]$biomassYlim <<- val$biomassSensYlim
opList[[val$entryScenario]][[4]]$maxBiomassYlim <<- val$maxBiomassSensYlim
winList <- c(biomassYlim=val$biomassSensYlim,
maxBiomassYlim=val$maxBiomassSensYlim,
biomassRetroYlim=val$biomassSensYlim,
maxBiomassRetroYlim=val$maxBiomassSensYlim)
try(setWinVal(winList), silent=silent)
}else if(act=="changeBiomassRetroYlim"){
# Set the base and sensitivity ylimit entry boxes and check boxes
opList[[val$entryScenario]][[4]]$biomassYlim <<- val$biomassRetroYlim
opList[[val$entryScenario]][[4]]$maxBiomassYlim <<- val$maxBiomassRetroYlim
winList <- c(biomassYlim=val$biomassRetroYlim,
maxBiomassYlim=val$maxBiomassRetroYlim,
biomassSensYlim=val$biomassRetroYlim,
maxBiomassSensYlim=val$maxBiomassRetroYlim)
try(setWinVal(winList), silent=silent)
}else if(act=="changeDepletionYlim"){
# Set the sensitivity and retro ylimit entry boxes and check boxes
opList[[val$entryScenario]][[4]]$depletionYlim <<- val$depletionYlim
opList[[val$entryScenario]][[4]]$maxDepletionYlim <<- val$maxDepletionYlim
winList <- c(depletionSensYlim=val$depletionYlim,
maxDepletionSensYlim=val$maxDepletionYlim,
depletionRetroYlim=val$depletionYlim,
maxDepletionRetroYlim=val$maxDepletionYlim)
try(setWinVal(winList), silent=silent)
}else if(act=="changeDepletionSensYlim"){
# Set the base and retro ylimit entry boxes and check boxes
opList[[val$entryScenario]][[4]]$depletionYlim <<- val$depletionSensYlim
opList[[val$entryScenario]][[4]]$maxDepletionYlim <<- val$maxDepletionSensYlim
winList <- c(depletionYlim=val$depletionSensYlim,
maxDepletionYlim=val$maxDepletionSensYlim,
depletionRetroYlim=val$depletionSensYlim,
maxDepletionRetroYlim=val$maxDepletionSensYlim)
try(setWinVal(winList), silent=silent)
}else if(act=="changeDepletionRetroYlim"){
# Set the base and sensitivity ylimit entry boxes and check boxes
opList[[val$entryScenario]][[4]]$depletionYlim <<- val$depletionRetroYlim
opList[[val$entryScenario]][[4]]$maxDepletionYlim <<- val$maxDepletionRetroYlim
winList <- c(depletionYlim=val$depletionRetroYlim,
maxDepletionYlim=val$maxDepletionRetroYlim,
depletionSensYlim=val$depletionRetroYlim,
maxDepletionSensYlim=val$maxDepletionRetroYlim)
try(setWinVal(winList), silent=silent)
}else if(act=="changeRecruitmentYlim"){
# Set the sensitivity and retro ylimit entry boxes and check boxes
opList[[val$entryScenario]][[4]]$recruitmentYlim <<- val$recruitmentYlim
opList[[val$entryScenario]][[4]]$maxRecruitmentYlim <<- val$maxRecruitmentYlim
winList <- c(recruitmentSensYlim=val$recruitmentYlim,
maxRecruitmentSensYlim=val$maxRecruitmentYlim,
recruitmentRetroYlim=val$recruitmentYlim,
maxRecruitmentRetroYlim=val$maxRecruitmentYlim)
try(setWinVal(winList), silent=silent)
}else if(act=="changeRecruitmentSensYlim"){
# Set the base and retro ylimit entry boxes and check boxes
opList[[val$entryScenario]][[4]]$recruitmentYlim <<- val$recruitmentYlim
opList[[val$entryScenario]][[4]]$maxRecruitmentYlim <<- val$maxRecruitmentYlim
winList <- c(recruitmentYlim=val$recruitmentSensYlim,
maxRecruitmentYlim=val$maxRecruitmentSensYlim,
recruitmentRetroYlim=val$recruitmentSensYlim,
maxRecruitmentRetroYlim=val$maxRecruitmentSensYlim)
try(setWinVal(winList), silent=silent)
}else if(act=="changeRecruitmentRetroYlim"){
# Set the base and sensitivity ylimit entry boxes and check boxes
opList[[val$entryScenario]][[4]]$recruitmentYlim <<- val$recruitmentRetroYlim
opList[[val$entryScenario]][[4]]$maxRecruitmentYlim <<- val$maxRecruitmentRetroYlim
winList <- c(recruitmentYlim=val$recruitmentRetroYlim,
maxRecruitmentYlim=val$maxRecruitmentRetroYlim,
recruitmentSensYlim=val$recruitmentRetroYlim,
maxRecruitmentSensYlim=val$maxRecruitmentRetroYlim)
try(setWinVal(winList), silent=silent)
}else if(act=="changeRefptSensYlim"){
# Set the base and retro ylimit entry boxes and check boxes
winList <- c(RefptSensYlim=val$RefptSensYlim,
maxRefptSensYlim=val$maxRefptSensYlim)
try(setWinVal(winList), silent=silent)
}
# Whichever radio button is selected will now be plotted for the scenario
.doPlots()
}
.doPlots <- function(){
# val is the value object from GetWinVal()
val <- getWinVal()
if (saveon) tput(val) ## store the current val in .PBSmodEnv for use by saveFig
pType <- val$viewPlotType
.setBurnThin()
oldpar <- par( no.readonly=TRUE )
if(.checkEntries()){
#################################################
# Call figure code from pcod_iscamExecutiveSummary.r #
#################################################
if(pType=="sLandings"){
fig.a()
}else if(pType=="sEffort"){
fig.effort()
}else if(pType=="sBiomass"){
fig.b(includeMPD=F,
ylimit=val$biomassYlim,
useMaxYlim=val$maxBiomassYlim,
useHRP=val$useHRP, minYr=val$minYr, aveYr=val$aveYr,
tac.use=val$currTAC,
opacity=20,
main="Spawning biomass",
xlab="Year")
}else if(pType=="sBiomassMPDOver"){
fig.b(includeMPD=T,
ylimit=val$biomassYlim,
useMaxYlim=val$maxBiomassYlim,
useHRP=val$useHRP, minYr=val$minYr, aveYr=val$aveYr,
tac.use=val$currTAC,
opacity=20,
main="Spawning biomass",
xlab="Year")
}else if(pType=="sBiomassMPD"){
fig.biomass.mpd(ylimit=val$biomassYlim,
useMaxYlim=val$maxBiomassYlim,
main="Spawning biomass",
xlab="Year")
}else if(pType=="tBiomass"){
fig.bt(includeMPD=F,
ylimit=val$tbiomassYlim,
useMaxYlim=val$maxtBiomassYlim,
opacity=20,
main="Total biomass",
xlab="Year")
}else if(pType=="tBiomassMPDOver"){
fig.bt(includeMPD=T,
ylimit=val$tbiomassYlim,
useMaxYlim=val$maxtBiomassYlim,
opacity=20,
main="Total biomass",
xlab="Year")
}else if(pType=="tBiomassMPD"){
fig.bt.mpd(ylimit=val$tbiomassYlim,
useMaxYlim=val$maxtBiomassYlim,
main="Total biomass",
xlab="Year")
} else if(pType=="sBiomassRecruits"){
fig.biomass.recruits(yBiomassYlim=val$biomassYlim,
useMaxBiomassYlim=val$maxBiomassYlim,
yRecruitmentYlim=val$recruitmentYlim,
useMaxRecruitmentYlim=val$maxRecruitmentYlim)
}else if(pType=="sDepletion"){
fig.c(includeMPD=F,
ylimit=val$depletionYlim,
useMaxYlim=val$maxDepletionYlim,
useHRP=val$useHRP, minYr=val$minYr, aveYr=val$aveYr,
xlab="Year",
main="Spawning depletion")
}else if(pType=="sDepletionMPDOver"){
fig.c(includeMPD=T,
ylimit=val$depletionYlim,
useMaxYlim=val$maxDepletionYlim,
useHRP=val$useHRP, minYr=val$minYr, aveYr=val$aveYr,
xlab="Year",
main="Spawning depletion")
}else if(pType=="sDepletionMPD"){
fig.depletion.mpd(ylimit=val$depletionYlim, useMaxYlim=val$maxDepletionYlim, useHRP=val$useHRP, minYr=val$minYr, aveYr=val$aveYr)
}else if(pType=="sRecruits"){
fig.dmcmc(ylimit=val$recruitmentYlim,
useMaxYlim=val$maxRecruitmentYlim,
xlab="Year",
main="Recruitment")
}else if(pType=="sRecruitsMPD"){
fig.d(ylimit=val$recruitmentYlim,
useMaxYlim=val$maxRecruitmentYlim,
xlab="Year",
main="Recruitment")
}else if(pType=="sSPRMSY"){
fig.e1()
}else if(pType=="sSPRf40"){
fig.e2()
}else if(pType=="sFMCMC"){
fig.Fmcmc()
}else if(pType=="sFMPD"){
fig.Fmpd()
}else if(pType=="sBtBmsy"){
fig.h()
}else if(pType=="sEquilYield"){
fig.i()
}else if(pType=="sEquilF"){
fig.j()
#####################################
# Call figure code from pcod_iscamFigs.r #
#####################################
}else if(pType=="sCommAgeResids1"){
fig.comm.age.residuals1()
}else if(pType=="sCommAgeResids2"){
fig.comm.age.residuals2()
}else if(pType=="sCommAgeProps"){
fig.comm.age.props()
}else if(pType=="sSurvAgeResids1"){
fig.survey.age.residuals1()
}else if(pType=="sSurvAgeResids2"){
fig.survey.age.residuals2()
}else if(pType=="sSurvAgeProps"){
fig.survey.age.props()
}else if(pType=="sCommAgePropsFit"){
fig.comm.age.props.fit()
}else if(pType=="sSurvAgePropsFit"){
fig.survey.age.props.fit()
}else if(pType=="sSurvBiomassFit"){
fig.surveybiomass.fit()
}else if(pType=="sSelectivities"){
fig.selectivity()
}else if(pType=="sCatchFit"){
fig.catchFit()
}else if(pType=="sWeightFit"){
fig.weightFit()
}else if(pType=="sFishingMortality"){
fig.fishingMortality()
}else if(pType=="sRecAnom"){ #RF July 2012
fig.RecAnomMPD()
}else if(pType=="sRecAnomMCMC"){ #RF July 2012
fig.RecAnomMCMC()
}else if(pType=="sannualMeanWt"){ #RF July 2012
fig.annualMeanWt()
}else if(pType=="sPhase"){
fig.phase()
}else if(pType=="sTimeVaryingSurvSel"){
fig.time.varying.selectivity(2)
}else if(pType=="sTimeVaryingCommSel"){
fig.time.varying.selectivity(1)
}else if(pType=="sCtlPtsBox" && delaydiff==1){
if(nyr>=2012) {fig.Allcontrol.pts.Box(tac.use=val$currTAC, useHRP=val$useHRP, minYr=val$minYr, aveYr=val$aveYr)
}else cat("No control point plot for 2005 bridging analyses\n")
}else if(pType=="sMSYCtlPts" && delaydiff==1){
if(nyr>=2012) {fig.MSYcontrol.pts()
}else cat("No control point plot for 2005 bridging analyses\n")
}else if(pType=="sHistCtlPts" && delaydiff==1){
if(nyr>=2012) {fig.Histcontrol.pts(minYr=val$minYr, aveYr=val$aveYr)
}else cat("No control point plot for 2005 bridging analyses\n")
}else if(pType=="sBench" && delaydiff==1){
if(nyr>=2012) {fig.Benchmarks()
}else cat("No control point plot for 2005 bridging analyses\n")
}else if(pType=="sFmax" && delaydiff==1){
fig.Fdensity(scenario = scenarioCurr,
opList = opList,
cribtab = cribtab,
sensfld = "sens",
type = "box")
}else if(pType=="sCtlPtsBox" && delaydiff==0){
cat("No control point plot for age-structured model\n")
}else if(pType=="sMSYCtlPts" && delaydiff==0){
cat("No control point plot for age-structured model\n")
}else if(pType=="sHistCtlPts" && delaydiff==0){
cat("No control point plot for age-structured model\n")
}else if(pType=="sBench" && delaydiff==0){
cat("No control point plot for age-structured model\n")
}else if(pType=="sSnailTrail"){
fig.snail(useHRP=val$useHRP) ##RH: taken from PBSawatea
#################################################
# Call Parameter estimate code from pcod_iscamFigs.r #
#################################################
}else if(pType=="sPosteriorParams"){
fig.mcmc.priors.vs.posts(exFactor=1.0)
}else if(pType=="sPosteriorParams2"){
fig.mcmc.priors.vs.posts2(exFactor=1.0)
}else if(pType=="sPosteriorParamskey"){
fig.mcmc.priors.vs.postskey(exFactor=1.0)
}else if(pType=="sParameterPairs"){
fig.estimated.params.pairs()
}else if(pType=="sParameterPairs2"){
fig.estimated.params.pairs2()
}else if(pType=="sParameterPairskey"){
fig.estimated.params.pairs.key()
}else if(pType=="sParameterPairsnologkey"){
fig.estimated.params.pairs.no.log.key()
}else if(pType=="sVariancePartitions"){
fig.variance.partitions()
}else if(pType=="sPriorsVsPosts"){
fig.mcmc.priors.vs.posts(exFactor=1.0,showEntirePrior=T)
}else if(pType=="sMCMCTrace"){
fig.mcmc.trace()
}else if(pType=="sMCMCChains"){
dmcmc = mcmc2(A$mc[,parEstimated()],start=Burn+1,thin=Thin)
plotChains(dmcmc,pdisc=0,axes=TRUE,between=list(x=0.15,y=0.2),col.trace=c("green","red","blue"),xlab="Sample",ylab="Cumulative Frequency")
}else if(pType=="sMCMCAutocor"){
fig.mcmc.autocor()
}else if(pType=="sMCMCDensity"){
fig.mcmc.density()
}else if(pType=="sMCMCGeweke"){
fig.mcmc.geweke()
}else if(pType=="sMCMCGelman"){
fig.mcmc.gelman()
###################################################
# Call Sensitivity plotting code from pcod_iscamSens.r #
###################################################
}else if(pType=="sSensSB"){
fig.base.vs.sens(sensitivityGroup=val$entrySensitivityGroup,
whichPlot="biomass",
useHRP=val$useHRP, minYr=val$minYr, aveYr=val$aveYr,
ylimit=val$biomassYlim,
useMaxYlim=val$maxBiomassYlim,
offset=0.3)
}else if(pType=="sSensD"){
fig.base.vs.sens(sensitivityGroup=val$entrySensitivityGroup,
whichPlot="depletion",
useHRP=val$useHRP, minYr=val$minYr, aveYr=val$aveYr,
ylimit=val$depletionYlim,
useMaxYlim=val$maxDepletionYlim)
}else if(pType=="sSensRec"){
fig.base.vs.sens(sensitivityGroup=val$entrySensitivityGroup,
whichPlot="recruits",
useHRP=val$useHRP, minYr=val$minYr, aveYr=val$aveYr,
ylimit=val$recruitmentYlim,
useMaxYlim=val$maxRecruitmentYlim,
offset=0.3)
}else if(pType=="sSensRefPts"){
fig.base.vs.sens(sensitivityGroup=val$entrySensitivityGroup,
whichPlot="refpts",
useHRP=val$useHRP, minYr=val$minYr, aveYr=val$aveYr,
ylimit=val$RefptSensYlim,
useMaxYlim=val$maxRefptSensYlim)
######################################################
# Call Retrospective plotting code from pcod_iscamRetro.r #
######################################################
}else if(pType=="sRetroSB"){
fig.retro(whichPlot="biomass",
ylimit=val$biomassYlim,
useMaxYlim=val$maxBiomassYlim)
}else if(pType=="sRetroD"){
fig.retro(whichPlot="depletion",
ylimit=val$depletionYlim,
useMaxYlim=val$maxDepletionYlim)
}else if(pType=="sRetroRec"){
fig.retro(whichPlot="recruits",
ylimit=val$recruitmentYlim,
useMaxYlim=val$maxRecruitmentYlim)
#############################################
# Call runtime Values code from pcod_iscamFigs.r #
#############################################
}else if(pType=="sObjFuncVal"){
plotRuntimeStats(1)
}else if(pType=="sMaxGrad"){
plotRuntimeStats(2)
}else if(pType=="sFuncEvals"){
plotRuntimeStats(3)
}else if(pType=="sHangCodes"){
plotRuntimeStats(4)
}else if(pType=="sExitCodes"){
plotRuntimeStats(5)
}
}
par(oldpar)
return(invisible())
}
.writeAllTables <- function(silent=FALSE, gui=FALSE){
# write all tables for all scenarios to disk
# scenarioList <- as.numeric(rownames(viewHeader)) ## already defined globally
if(exists("scenarioList",envir=.GlobalEnv, inherits=FALSE))
scenarioList = get("scenarioList",envir=.GlobalEnv)
else stop("PROBLEM: scenarioList not found in Global environment")
for(scenario in scenarioList){
assignGlobals(scenario, gui=gui)
.writeTables(silent=silent, gui=gui)
}
}
## --------------------------
## Last modified by RH 170906
## --------------------------
.writeTables <- function(silent=FALSE, gui=TRUE){
## write all tables for the given scenario to disk
if(exists("scenarioCurr",envir=.GlobalEnv, inherits=FALSE))
scenarioCurr = get("scenarioCurr",envir=.GlobalEnv)
else stop("PROBLEM: scenarioCurr not found in Global environment")
setWinVal(list(entryScenario=scenarioCurr))
val <- getWinVal()
if (!isScenMcmc()) {
cat("WARNING (.writeTables): No MCMCs generated for this scenario\n"); return(invisible("No MCMC data")) }
.setBurnThin(silent=silent, gui=gui)
#Nyears = length(A$yr)
cat("\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n")
#cat("No tables currently implemented\n")
cat("Writing tables\n")
cat("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n")
if(delaydiff && nyr>=2012){
quantProbs <- quants3 ## SST & WAP
weightScale <- 1000.0
debug = F; if (!debug) {
try(table.projections(), silent=silent)
try(table.decision(useHRP=val$useHRP, minYr=val$minYr, aveYr=val$aveYr), silent=silent)
## Make Fishing mortality Quantiles and MPD table
table.mcmc.mpd(mcmcData = A$mc.ft,
burnin = Burn,
probs = quantProbs,
mpdData = A$ft,
colLabels = A$yr,
roundDec = 6, # Number of decimal places
tableName = "FishingMortalityQuants")
## Make Biomass Quantiles and MPD table
table.mcmc.mpd(mcmcData = A$mc.sbt / weightScale,
burnin = Burn,
probs = quantProbs,
mpdData = A$sbt / weightScale,
colLabels = A$yrs,
formatOut = "%1.3f",
roundDec = 6, # Number of decimal places
tableName = "BiomassQuants")
## Make Recruitment Quantiles and MPD table
table.mcmc.mpd(mcmcData = A$mc.rt,
burnin = Burn,
probs = quantProbs,
mpdData = A$rt,
#colLabels = A$yr[-(1:2)], # The 2 is because there is age-2 recruitment (P.cod)
colLabels = A$yr[-(1:ifelse(A$delaydiff==1,max(A$sage,A$kage-A$sage),A$sage))], # Use kage if delaydiff model for age-k recruitment
roundDec = 6, # Number of decimal places
tableName = "RecruitmentQuants")
## Make Paramter Quantiles and MPD values table
mcmcParamTable <- cbind(exp(A$mc$log.ro), A$mc$h, exp(A$mc$log.m), exp(A$mc$log.rbar), exp(A$mc$log.rinit), A$mc$bo)
paramNames <- c("r0","steepness","m","rbar","rbar_init","b0")
mpdParamVector <- c(A$ro, A$steepness, A$m, A$rbar, A$rinit, A$sbo)
# Add variable number of q's
for(qInd in 1:length(A$q)){
mcmcParamTable <- cbind(mcmcParamTable, eval(parse(text=paste0("A$mc$q",qInd))))
mpdParamVector <- c(mpdParamVector, A$q[qInd])
paramNames <- c(paramNames, paste0("q",qInd))
}
colnames(mcmcParamTable) <- paramNames
table.mcmc.mpd(mcmcData = mcmcParamTable,
burnin = Burn,
probs = quantProbs,
mpdData = mpdParamVector,
colLabels = paramNames,
roundDec = 6, # Number of decimal places
tableName = "ParameterQuants")
}
### ========================================
### Make one table with Parameters (P), Biomass-based quantities (B), and MST-based quantities (M)
### The table is formatted to be latex-ready and sent to 'PBMQuants.csv' in the relevant Scenario's table folder.
### When building the Model Results latex file, import the CSV and use the PBStools function `texArray'.
### ========================================
## For compatibility with calcHRPs, adjust for burnin before sending to 'table.mcmc.mpd()'
# 1. Make Paramter Quantiles and MPD values table
mcmcParamTable <- cbind(rep(0,nrow(A$mc)),exp(A$mc$log.ro), A$mc$h, exp(A$mc$log.m), exp(A$mc$log.rbar), exp(A$mc$log.rinit))
mpdParamVector <- c(0,A$ro, A$steepness, A$m, A$rbar, A$rinit)
paramNames <- c("Parameters","$R_0$","$h$","$M$","$\\bar{R}$","$\\bar{R}_\\mathrm{init}$")
# Add variable number of q's
for(qInd in 1:length(A$q)){
mcmcParamTable <- cbind(mcmcParamTable, eval(parse(text=paste0("A$mc$q",qInd))))
mpdParamVector <- c(mpdParamVector, A$q[qInd])
paramNames <- c(paramNames, paste0("$q_",qInd,"$"))
}
mcmcParamTable = mcmc2(mcmcParamTable, start=Burn+1) ## get rid of burnin now
## Collect MPD values
Bcurr.mpd = (rev(A$sbt))[1] ## final year fixed in loadScenarios
Ucurr.mpd = A$ut[length(A$yr)]
## MSY-based MPD
B0.mpd = A$sbo
msy.mpd = A$msy
Bmsy.mpd = A$bmsy
Umsy.mpd = 1-exp(-A$fmsy)
## HRP MPD
aveYr = renderVals(aveYr=val$aveYr,simplify=TRUE)
sbt.mpd = A$sbt; names(sbt.mpd) = A$yrs
sbt.min = findBmin(sbt.mpd,aveYr)
#Bavg.mpd = mean(A$sbt[is.element(yrs,aveYr)])
#LRP.mpd = min(A$sbt)
Bavg.mpd = sbt.min["Bavg"]
LRP.mpd = sbt.min["Bmin"]
USR.mpd = 2. * LRP.mpd
Bcurr.Bavg.mpd = Bcurr.mpd/Bavg.mpd
Bcurr.LRP.mpd = Bcurr.mpd/LRP.mpd
Bcurr.USR.mpd = Bcurr.mpd/USR.mpd
Uavg.mpd = mean(A$ut[is.element(yr,aveYr)])
Ucurr.Uavg.mpd = Ucurr.mpd/Uavg.mpd
## Collect MCMC values
#dmcmc = subset(A$mcproj, TAC==0) # take the first tac (=0) from each posterior sample - the control points do not change with tac
dmcmc = mcmc2(subset(A$mcproj, TAC==0), start=Burn+1)
zeros = rep(0,nrow(dmcmc))
Bcurr.mcmc = dmcmc[,paste0("B",A$currYr)]
Ucurr.mcmc = 1-exp(-dmcmc[,paste0("F",A$lastYr)])
## MSY-based
B0.mcmc = dmcmc[,"B0"] ## same as A$mc$bo
msy.mcmc = dmcmc[,"MSY"]
Bmsy.mcmc = dmcmc[,"BMSY"]
Umsy.mcmc = dmcmc[,"UMSY"]
## HRP MCMC -- use function `calcHRP' with Burn=0 in 'table.mcmc.mpd()'
HRPs = calcHRP(A=A, aveYr=aveYr, Burn=Burn) ## one function to collect all of the stuff below
unpackList(HRPs)
#browser();return()
#post.bt = A$mc.sbt #[,1:Nyears]
#post.avebt = post.bt[,is.element(yrs,aveYr)]
#Bavg.mcmc = apply(post.avebt,1,function(x){mean(x)}) ## Bo = Bavg
Bavg.mcmc = post.abt
#med.bt = sapply(post.bt,median) ## only used to determine minimum year
#minYr = yrs[is.element(med.bt,min(med.bt))] ## overrides GUI value or user's value
## The following takes minimum depletion (BT/Bavg) across year(s) designated as minimum;
## perhaps should be the median though if only one minimum year is specified, then it makes no difference.
#LRP.mcmc = apply(post.bt[,is.element(yrs,minYr),drop=FALSE],1,min); ## across years therefore 1000 mins (+ Burn mins)
#USR.mcmc = 2. * LRP.mcmc
LRP.mcmc = bLRPs
USR.mcmc = bUSRs
Bcurr.Bavg.mcmc = Bcurr.mcmc/Bavg.mcmc
Bcurr.LRP.mcmc = Bcurr.mcmc/LRP.mcmc
Bcurr.USR.mcmc = Bcurr.mcmc/USR.mcmc
#browser(); return()
#post.ft = mcmc2(A$mc.ft, start=Burn+1) #[,1:Nyears]
#post.ht = 1 - exp(-post.ft) ## harvest rate (same as HRPs$post.ft)
#post.ha = post.ht[,is.element(yr,aveYr)] ## harvest rates in years for average (same as HRPs$post.aveut)
#Uavg.mcmc = apply(post.ha,1,function(x){mean(x)}) ## average harvest rates (1000 MCMC samples) (same as HRPs$post.aut)
Uavg.mcmc = post.aut ## (unpacked from HRPs)
Ucurr.Uavg.mcmc = Ucurr.mcmc/Uavg.mcmc
if (!val$useHRP) {
# 2. Add Biomass-based values
mcmcParamTable <- cbind(mcmcParamTable, zeros, 0.2*B0.mcmc, 0.4*B0.mcmc, B0.mcmc, Bcurr.mcmc, Bcurr.mcmc/B0.mcmc, Ucurr.mcmc)
mpdParamVector <- c(mpdParamVector, 0, 0.2*B0.mpd, 0.4*B0.mpd, B0.mpd, Bcurr.mpd, Bcurr.mpd/B0.mpd, Ucurr.mpd)
paramNames <- c(paramNames, "Model-based", paste0(c("0.2","0.4",""),"$B_0$"), paste0("$B_{",A$currYr,"}",c("$","/B_0$")), paste0("$u_{",A$lastYr,"}$"))
# 3. Add MSY-based values
mcmcParamTable <- cbind(mcmcParamTable, zeros, 0.4*Bmsy.mcmc, 0.8*Bmsy.mcmc, Bmsy.mcmc, Bmsy.mcmc/B0.mcmc, Bcurr.mcmc/Bmsy.mcmc, msy.mcmc, Umsy.mcmc, Ucurr.mcmc/Umsy.mcmc)
mpdParamVector <- c(mpdParamVector, 0, 0.4*Bmsy.mpd, 0.8*Bmsy.mpd, Bmsy.mpd, Bmsy.mpd/B0.mpd, Bcurr.mpd/Bmsy.mpd, msy.mpd, Umsy.mpd, Ucurr.mpd/Umsy.mpd)
paramNames <- c(paramNames, "MSY-based", paste0(c("0.4","0.8",""),"$B_\\mathrm{MSY}$"), paste0(c("$",paste0("$B_{",A$currYr,"}/")),"B_\\mathrm{MSY}",c("/B_0$","$")), "MSY", paste0(c("$",paste0("$u_{",A$lastYr,"}/")),"u_\\mathrm{MSY}$") )
} else {
# 4 Add HRP-based values
mcmcParamTable <- cbind(mcmcParamTable, zeros, Bcurr.mcmc, Bavg.mcmc, LRP.mcmc, USR.mcmc, Bcurr.Bavg.mcmc, Bcurr.LRP.mcmc, Bcurr.USR.mcmc, Uavg.mcmc, Ucurr.Uavg.mcmc )
mpdParamVector <- c(mpdParamVector, 0, Bcurr.mpd, Bavg.mpd, LRP.mpd, USR.mpd, Bcurr.Bavg.mpd, Bcurr.LRP.mpd, Bcurr.USR.mpd, Uavg.mpd, Ucurr.Uavg.mpd)
paramNames <- c(paramNames, "HRP-based", paste0("$B_{",A$currYr,"}$"), "$B_\\mathrm{avg}$", paste0(c("$\\mathrm{LRP}=","$\\mathrm{USR}=2"), "B_{", minYr, "}$"), paste0(paste0("$B_{",A$currYr,"}/"),c("B_\\mathrm{avg}$",paste0(c("B_{","2B_{"),minYr,"}$"))), paste0(c("$",paste0("$u_{",A$lastYr,"}/")),"u_\\mathrm{avg}$") )
}
colnames(mcmcParamTable) <- paramNames
table.mcmc.mpd(mcmcData = mcmcParamTable,
burnin = 0,
probs = quantProbs,
mpdData = mpdParamVector,
colLabels = paramNames,
formatOut = "%1.6f", # Decimal places shown regardless of what roundDec is (RH: actually, both this and roundDec need to be the same)
roundDec = 6, # Number of decimal places
tableName = "PBMQuants")
} else {
cat("No decision tables for age-structured model or bridging analysis\n")
}
# try(mb.table.all.param.est(roundDec=2,formatOut="%1.2f"), silent=silent)
# try(table.i(), silent=silent)
# try(table.h(mle=F,tableType="ssb"), silent=silent)
# try(table.h(mle=F,tableType="depletion"), silent=silent)
# try(table.h(mle=F,tableType="f40spr"), silent=silent)
#try(table.b(), silent=silent)
#try(table.c(), silent=silent)
#try(table.d(), silent=silent)
#try(table.e1(), silent=silent)
#try(table.e2(), silent=silent)
#try(table.f(), silent=silent)
#try(table.h(mle=T,tableType="ssb"), silent=silent)
#try(table.h(mle=T,tableType="depletion"), silent=silent)
#try(table.h(mle=T,tableType="f40spr"), silent=silent)
#try(table.1(), silent=silent)
#try(table.2(), silent=silent)
#try(table.3(), silent=silent)
#try(table.4(), silent=silent)
}
.writeAllPlots <- function(silent=F, gui=FALSE){
# write all figures for all scenarios to disk
# scenarioList <- as.numeric(rownames(viewHeader)) ## already defined globally
if(exists("scenarioList",envir=.GlobalEnv, inherits=FALSE))
scenarioList = get("scenarioList",envir=.GlobalEnv)
else stop("PROBLEM: scenarioList not found in Global environment")
for(scenario in scenarioList){
assignGlobals(scenario, gui=gui)
#browser();return()
.writePlots(silent=silent, gui=gui)
}
}
.writePlots <- function(silent=FALSE, gui=TRUE){
### write all figures for the given scenario to disk
windows(width=7,height=7,record=TRUE); frame(); dev.box = dev.cur()
on.exit(dev.off(dev.box))
.setBurnThin(silent=silent, gui=gui)
if(exists("scenarioCurr",envir=.GlobalEnv, inherits=FALSE))
scenarioCurr = get("scenarioCurr",envir=.GlobalEnv)
else stop("PROBLEM: scenarioCurr not found in Global environment")
setWinVal(list(entryScenario=scenarioCurr))
val <- getWinVal()
assign("saveon",TRUE,envir=.GlobalEnv)
aside.calls = c(
"fig.a","fig.effort","fig.e1","fig.e2","fig.g","fig.h","fig.i","fig.j",
"fig.comm.age.residuals1","fig.comm.age.residuals2","fig.comm.age.props",
"fig.survey.age.residuals1","fig.survey.age.residuals2","fig.survey.age.props",
"fig.comm.age.props.fit","fig.survey.age.props.fit","fig.selectivity","fig.phase",
"fig.catchFit","fig.mcmc.density","fig.mcmc.geweke","fig.estimated.params.pairs2",
"fig.estimated.params.pairs.key","fig.estimated.params.pairs.no.log.key","fig.variance.partitions"
)
simple.calls = c(
"fig.catchFit", "fig.surveybiomass.fit", "fig.Fmcmc", "fig.weightFit",
"fig.mcmc.trace", "fig.RecAnomMCMC", "fig.mcmc.autocor", "fig.estimated.params.pairs"
)
for (s in simple.calls)
if (exists(s)) eval(call(s))
### Calls that require argument specifications
### ------------------------------------------
### Spawning biomass (MPD only)
if(exists("fig.biomass.mpd"))
fig.biomass.mpd(ylimit=val$biomassYlim, useMaxYlim=val$maxBiomassYlim, main="Spawning biomass", xlab="Year")
### Spawning depletion (MPD only)
if(exists("fig.depletion.mpd"))
fig.depletion.mpd(ylimit=val$depletionYlim, useMaxYlim=val$maxDepletionYlim, useHRP=val$useHRP, minYr=val$minYr, aveYr=val$aveYr)
### Biomass MCMC with MPD overlay
if(exists("fig.b"))
fig.b(includeMPD=T, ylimit=val$biomassYlim, useMaxYlim=val$maxBiomassYlim, useHRP=val$useHRP, minYr=val$minYr, aveYr=val$aveYr, tac=val$currTAC)
### Depletion MCMC with MPD overlay
if(exists("fig.c"))
fig.c(includeMPD=T, ylimit=val$depletionYlim, useMaxYlim=val$maxDepletionYlim, useHRP=val$useHRP, minYr=val$minYr, aveYr=val$aveYr)
### Recruitment MCMC
if(exists("fig.dmcmc"))
fig.dmcmc(ylimit=val$recruitmentYlim, useMaxYlim=val$maxRecruitmentYlim)
### Recruitment MPD
if(exists("fig.d"))
fig.d(ylimit=val$recruitmentYlim, useMaxYlim=val$maxRecruitmentYlim)
### Biomass and recruitment, two-panel plot
#try(fig.biomass.recruits(yBiomassYlim=val$biomassYlim,
# useMaxBiomassYlim=val$maxBiomassYlim,
# yRecruitmentYlim=val$recruitmentYlim,
# useMaxRecruitmentYlim=val$maxRecruitmentYlim), silent=silent)
#if(exists("fig.time.varying.selectivity")) {
# fig.time.varying.selectivity(2)
# fig.time.varying.selectivity(1)
#}
if(exists("fig.mcmc.priors.vs.posts")) fig.mcmc.priors.vs.posts(exFactor=1.0, showEntirePrior=T)
#if(exists("fig.mcmc.priors.vs.posts2")) fig.mcmc.priors.vs.posts2(exFactor=1.0)
#if(exists("fig.mcmc.priors.vs.postskey")) fig.mcmc.priors.vs.postskey(exFactor=1.0)
if(exists("plotChains") && isScenMcmc()){
dmcmc = mcmc2(A$mc[,parEstimated()],start=Burn+1,thin=Thin)
plotChains(dmcmc,pdisc=0,axes=TRUE,between=list(x=.15,y=.2),col.trace=c("green3","red","blue"),xlab="Sample",ylab="Cumulative Frequency")
}
if(exists("fig.Allcontrol.pts.Box") && isScenMcmc()){
fig.Allcontrol.pts.Box(tac.use=val$currTAC, useHRP=val$useHRP, minYr=val$minYr, aveYr=val$aveYr)
}
# if(delaydiff==1){
# #if(nyr>=2012) {
# dd.calls = c("fig.Allcontrol.pts.Box") #,"fig.Benchmarks"), "fig.MSYcontrol.pts", "fig.Histcontrol.pts"
# for (d in dd.calls)
# if (exists(d)) eval(call(d))
# }
if(exists("plotSnail_old") && isScenMcmc()){ ##RH: taken from PBSawatea
ddmcmc <- A$mcproj
tacs = sort(unique(ddmcmc$TAC))
ntac = which(abs(tacs-tac.use)==min(abs(tacs-tac.use))) ## https://stat.ethz.ch/pipermail/r-help/2008-July/167216.html
tac.use = tacs[ntac]
assign("currYear", A$currYr, envir=.GlobalEnv)
assign("lastYear", A$currYr - 1, envir=.GlobalEnv)
mcmc.prj = mcmc2(subset(A$mcproj, TAC==tac.use),start=(Burn+1),thin=Thin)
mcmc.sbt = mcmc2(A$mc.sbt[,1:length(A$yr)],start=(Burn+1),thin=Thin) ## RH: final year in A$mc.sbt is a pseudo-projection
Bt.mcmc = cbind(mcmc.sbt,Bcurr=mcmc.prj[,paste0("B",currYear)])
names(Bt.mcmc) = A$yrs
## RH: likely not appropriate for non-equilibrium start:
Ut.mcmc = mcmc2(cbind(U0=rep(0,nrow(A$mc.ft)),1.-exp(-A$mc.ft[,1:(length(A$yr)-1)])),start=(Burn+1),thin=Thin)
Ut.mcmc = cbind(Ut.mcmc,Ucurr=1.-exp(-mcmc.prj[,paste0("F",lastYear)]))
## RH: Name with year of Bt even though it is behind 0.5 years
names(Ut.mcmc) = A$yrs
Bmsy.mcmc = mcmc.prj[,"BMSY"]
Umsy.mcmc = mcmc.prj[,"UMSY"]
BoverBmsy = Bt.mcmc/Bmsy.mcmc
UoverUmsy = Ut.mcmc/Umsy.mcmc
plotSnail(BoverBmsy, UoverUmsy, p=c(0.1, 0.9), png=T, path=figDir, PIN=c(7,7))
}
if(exists("fig.snail") && isScenMcmc()){ ##RH: taken from PBSawatea
fig.snail(useHRP=val$useHRP, p=c(0.1, 0.9), png=T, path=figDir, PIN=c(7,7))
}
assign("saveon",FALSE,envir=.GlobalEnv)
return(invisible())
}
.writeSensPlots <- function(silent=F){
# write overlay sensitivity plots
windows(width=7,height=7,record=TRUE);frame()
on.exit(dev.off(dev.cur()))
assign("saveon",TRUE,envir=.GlobalEnv)
val <- getWinVal()
uniqueSensitivityGroups <- c() # base must be 0
for(scenario in 1:length(opList)){
# count number of unique sensitivity groups
if(!is.element(opList[[scenario]][[4]]$SensitivityGroup,uniqueSensitivityGroups) && opList[[scenario]][[4]]$SensitivityGroup != 0){
uniqueSensitivityGroups <- c(uniqueSensitivityGroups,opList[[scenario]][[4]]$SensitivityGroup)
}
}
for(sensitivityGroup in uniqueSensitivityGroups){
try(fig.base.vs.sens(sensitivityGroup=sensitivityGroup,
whichPlot="biomass",
useHRP=val$useHRP, minYr=val$minYr, aveYr=val$aveYr,
ylimit=val$biomassYlim,
useMaxYlim=val$maxBiomassYlim),silent=silent)
try(fig.base.vs.sens(sensitivityGroup=sensitivityGroup,
whichPlot="depletion",
useHRP=val$useHRP, minYr=val$minYr, aveYr=val$aveYr,
ylimit=val$depletionYlim,
useMaxYlim=val$maxDepletionYlim),silent=silent)
try(fig.base.vs.sens(sensitivityGroup=sensitivityGroup,
whichPlot="recruits",
useHRP=val$useHRP, minYr=val$minYr, aveYr=val$aveYr,
ylimit=val$recruitmentYlim,
useMaxYlim=val$maxRecruitmentYlim),silent=silent)
try(fig.base.vs.sens(sensitivityGroup=val$entrySensitivityGroup,
whichPlot="refpts",
useHRP=val$useHRP, minYr=val$minYr, aveYr=val$aveYr,
ylimit=val$RefptSensYlim,
useMaxYlim=val$maxRefptSensYlim), silent=silent)
}
assign("saveon",FALSE,envir=.GlobalEnv)
}
.runCurrScenario <- function(scenario=val$entryScenario, deleteFiles=F, mleOverride=F, copyADMBExecutables=F, silent=F)
{
# Steps:
# 1. Copy the ADMB model executable to the current scenario's folder
# or put it on the path defined on line 51 above.
# 2. Run the scenario using a system call with either MLE or MCMC
# 3. If MCMC, use another system call to run mceval
goodToGo <- T
val <- getWinVal()
keepMCMC <- F
if(val$executeType=="sMPD" || mleOverride){
keepMCMC <- T
}
if(deleteFiles){
deleteOldRun(scenario,keepMCMC=keepMCMC)
}
modelEXE <- exModel
if (copyADMBExecutables)
copyExecutableToScenarioFolder(scenario=scenario,silent=silent)
else {
modPath = sub("\\.;",paste0(".;",modelDir,";"),Sys.getenv()["PATH"])
Sys.setenv(PATH=modPath)
}
rscriptsDir <- getwd() # Save the rscripts directory so we can get back to it
setwd(opList[[scenario]][[1]]) # change to this scenario's directory
if(val$delaydiff==1){
if(val$executeType=="sMPD" || mleOverride){
if(is.na(val$maxfn)){
modelCall <- paste(modelEXE, "-delaydiff")
}else{
modelCall <- paste(modelEXE,"-maxfn",val$maxfn, "-delaydiff")
}
}else if(val$executeType=="sMCMC"){
if(is.na(val$mcmc) || is.na(val$mcsave)){
cat("Error - check your mcmc and mcsave boxes for valid values.\n")
goodToGo <- F
}else{
mcevalCall <- paste(modelEXE,"-mceval", "-delaydiff")
if(is.na(val$maxfn)){
modelCall <- paste(modelEXE,"-mcmc",val$mcmc,"-mcsave",val$mcsave, "-delaydiff")
}else{
modelCall <- paste(modelEXE,"-mcmc",val$mcmc,"-mcsave",val$mcsave,"-maxfn",val$maxfn, "-delaydiff")
}
}
}
} else { #end if delaydiff
if(val$executeType=="sMPD" || mleOverride){
if(is.na(val$maxfn)){
modelCall <- modelEXE
}else{
modelCall <- paste(modelEXE,"-maxfn",val$maxfn)
}
}else if(val$executeType=="sMCMC"){
if(is.na(val$mcmc) || is.na(val$mcsave)){
cat("Error - check your mcmc and mcsave boxes for valid values.\n")
goodToGo <- F
}else{
mcevalCall <- paste(modelEXE,"-mceval")
if(is.na(val$maxfn)){
modelCall <- paste(modelEXE,"-mcmc",val$mcmc,"-mcsave",val$mcsave)
}else{
modelCall <- paste(modelEXE,"-mcmc",val$mcmc,"-mcsave",val$mcsave,"-maxfn",val$maxfn)
}
}
}
} #end else delaydiff
#browser();return()
if(goodToGo){
shell(modelCall)
if(val$executeType=="sMCMC"){
shell(mcevalCall)
}
}
setwd(rscriptsDir)
loadScenario(scenario,silent=silent)
assignGlobals(scenario,silent=silent)
}
.loadPlottingLimits <- function(){
# load the plotting limits into proper entry and checkboxes
# BIOMASS YLIMITS
winList <- NULL
val <- getWinVal()
scenario <- val$entryScenario
if(!is.null(opList[[scenario]][[4]]$biomassYlim)){
winList <- c(winList,biomassYlim=opList[[scenario]][[4]]$biomassYlim)
winList <- c(winList,biomassSensYlim=opList[[scenario]][[4]]$biomassYlim)
winList <- c(winList,biomassRetroYlim=opList[[scenario]][[4]]$biomassYlim)
}
if(!is.null(opList[[scenario]][[4]]$maxBiomassYlim)){
winList <- c(winList,maxBiomassYlim=opList[[scenario]][[4]]$maxBiomassYlim)
winList <- c(winList,maxBiomassSensYlim=opList[[scenario]][[4]]$maxBiomassYlim)
winList <- c(winList,maxBiomassRetroYlim=opList[[scenario]][[4]]$maxBiomassYlim)
}
#TOTAL BIOMASS LIMITS
if(!is.null(opList[[scenario]][[4]]$tbiomassYlim)){
winList <- c(winList,tbiomassYlim=opList[[scenario]][[4]]$tbiomassYlim)
winList <- c(winList,biomassSensYlim=opList[[scenario]][[4]]$tbiomassYlim)
winList <- c(winList,biomassRetroYlim=opList[[scenario]][[4]]$tbiomassYlim)
}
if(!is.null(opList[[scenario]][[4]]$maxBiomassYlim)){
winList <- c(winList,maxtBiomassYlim=opList[[scenario]][[4]]$maxtBiomassYlim)
winList <- c(winList,maxtBiomassSensYlim=opList[[scenario]][[4]]$maxtBiomassYlim)
winList <- c(winList,maxtBiomassRetroYlim=opList[[scenario]][[4]]$maxtBiomassYlim)
}
# DEPLETION YLIMITS
if(!is.null(opList[[scenario]][[4]]$depletionYlim)){
winList <- c(winList,depletionYlim=opList[[scenario]][[4]]$depletionYlim)
winList <- c(winList,depletionSensYlim=opList[[scenario]][[4]]$depletionYlim)
winList <- c(winList,depletionRetroYlim=opList[[scenario]][[4]]$depletionYlim)
}
if(!is.null(opList[[scenario]][[4]]$maxDepletionYlim)){
winList <- c(winList,maxDepletionYlim=opList[[scenario]][[4]]$maxDepletionYlim)
winList <- c(winList,maxDepletionSensYlim=opList[[scenario]][[4]]$maxDepletionYlim)
winList <- c(winList,maxDepletionRetroYlim=opList[[scenario]][[4]]$maxDepletionYlim)
}
# RECRUITMENT YLIMITS
if(!is.null(opList[[scenario]][[4]]$recruitmentYlim)){
winList <- c(winList,recruitmentYlim=opList[[scenario]][[4]]$recruitmentYlim)
winList <- c(winList,recruitmentSensYlim=opList[[scenario]][[4]]$recruitmentYlim)
winList <- c(winList,recruitmentRetroYlim=opList[[scenario]][[4]]$recruitmentYlim)
}
if(!is.null(opList[[scenario]][[4]]$maxRecruitmentYlim)){
winList <- c(winList,maxRecruitmentYlim=opList[[scenario]][[4]]$maxRecruitmentYlim)
winList <- c(winList,maxRecruitmentSensYlim=opList[[scenario]][[4]]$maxRecruitmentYlim)
winList <- c(winList,maxRecruitmentRetroYlim=opList[[scenario]][[4]]$maxRecruitmentYlim)
}
try(setWinVal(winList), silent=silent)
}
.checkEntries <- function(){
# Ensures that the entry in the Scenarios box on the GUI is within proper limits
# Issues an alert box if they are not, and returns FALSE
# If they are within limits, returns TRUE
val <- getWinVal()
scenarioList <- as.numeric(rownames(viewHeader))
currScenario <- val$entryScenario
#browser();return()
if(currScenario<min(scenarioList) | currScenario>max(scenarioList)){
showAlert(paste("Your scenario must be between ",
min(scenarioList)," and ",
max(scenarioList),".\nNo plot will be drawn.",sep=""),
title="Scenario Error",icon="warning")
return(FALSE)
}
return(TRUE)
}
# .getWinName (get the current winName)
# Purpose: Determine which GUI is active (guiSim, guiView, guiPerf, etc.)
# Parameters: None
# Returns: A character containing the name of the current GUI window
# Source: A.R. Kronlund, modified from PBSref (helper_funs.r)
.getWinName <- function(){
win <- .PBSmod$.activeWin
# This is only required if PBSask is used, leave it for now.
if(win=="PBSask"){
win <- getWinVal("win", winName="PBSask")[[1]] # Hidden field in PBSask
win <- gsub("\n", "", win) # Remove the linefeed \n
}
return(win)
}
.closeActWin <- function(){
closeWin(.getWinName())
}
|
89fd630657513f68eae74f75a675317cd4916f86
|
0012be753cc009d8e162d2a2a3aa058ab88d59b9
|
/static/sample-scripts/test-periods.R
|
00577ed868bb49bc205b89535276b6d18c298730
|
[
"MIT"
] |
permissive
|
weecology/updating-data
|
5f640fe8d2bae30da7b82b91394217d027e163da
|
0c69e8bc43dc1e062b578c1a7d511e5cf6261727
|
refs/heads/master
| 2021-06-06T00:22:23.387450
| 2021-05-06T19:42:23
| 2021-05-06T19:42:23
| 160,896,919
| 0
| 3
|
MIT
| 2021-05-06T19:42:24
| 2018-12-08T02:38:03
|
R
|
UTF-8
|
R
| false
| false
| 257
|
r
|
test-periods.R
|
library(testthat)
library(dplyr)
context("checks that period values are valid")
base_data <- read.csv('../data/data.csv',
stringsAsFactors = F)
test_that("period numbers are valid", {
expect_true(all(base_data$period < 1000))
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.