blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
839288f421149103a1538d4a2a7a710ee65d9441
|
7d0551dd360330dd82a621636bde45d91714d511
|
/airGR/H_model_190613.R
|
0968d30a2389e1f307363948ae9419d68af9c131
|
[] |
no_license
|
thora-9/handouts_sesync
|
a745261df007d00a0764aa174a2dbb3322f2eb1b
|
2a0c909e47159619821d4ec76ddb95c70addc728
|
refs/heads/master
| 2020-06-23T00:12:54.248621
| 2019-09-25T23:05:07
| 2019-09-25T23:05:07
| 198,441,477
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,847
|
r
|
H_model_190613.R
|
library(mosaic)
library(nlstools)
in_data = read.csv('H_Pmax_Qmin.csv')
#Stage-Volume Relationship
plot(in_data$H,in_data$Pmax)
in_sub = subset(in_data, H<203)
f = lm(Pmax~poly(H,4), data = in_data)
in_x = seq(150,203,by=1)
plot(in_x, predict(f,list(H=in_x)),type = 'l')#, ylim = c(0,16),col='red',xlab = "Stage Height (m)", ylab = "Volume (km3)")
points(in_sub$H,in_sub$Pmax)
#save(f,file='H_model.rda')
#Stage-Volume Relationship
plot(in_data$H,in_data$Qmin)
in_sub2 = subset(in_data, H<208)
f2 = lm(Qmin~poly(H,10), data = in_sub2)
in_x = seq(170,203,by=1)
plot(in_x, predict(f2,list(H=in_x)),type = 'l')#, ylim = c(0,16),col='red',xlab = "Stage Height (m)", ylab = "Volume (km3)")
points(in_sub$H,in_sub$Qmin)
#save(f2,file='Q_min_H_max.rda')
in_sub2 = subset(in_data, H>207)
f3 = lm(Qmin~poly(H,1), data = in_sub2)
in_x = seq(208,210,by=0.1)
plot(in_x, predict(f3,list(H=in_x)),type = 'l')#, ylim = c(0,16),col='red',xlab = "Stage Height (m)", ylab = "Volume (km3)")
points(in_sub2$H,in_sub2$Qmin)
#save(f3,file='Q_min_H_max_208.rda')
in_data2 = read.csv('H100_50_Qmin.csv')
plot(in_data2$H,in_data2$P_100)
plot(in_data2$H,in_data2$P_50)
in_data100 = subset(in_data2, H<208)
f_100 = lm(P_100~poly(H,4), data = in_data100)
in_x = seq(170,207,by=1)
plot(in_x, predict(f_100,list(H=in_x)),type = 'l')#, ylim = c(0,16),col='red',xlab = "Stage Height (m)", ylab = "Volume (km3)")
points(in_data100$H,in_data100$P_100)
#save(f_100,file='Q_min_H_100.rda')
in_data50 = subset(in_data2, H<207)
f_50 = lm(P_50~poly(H,4), data = in_data50)
in_x = seq(170,207,by=1)
plot(in_x, predict(f_50,list(H=in_x)),type = 'l')#, ylim = c(0,16),col='red',xlab = "Stage Height (m)", ylab = "Volume (km3)")
points(in_data50$H,in_data50$P_50)
#save(f_50,file='Q_min_H_50.rda')
|
97b077f2c3e8174ced63515e5e42a5819670fc17
|
3ad3ce5f38d636b649abd6e2e8741d482d8f6d72
|
/R/head_abbrev.R
|
4a4d4085db4b9ef56ee8cb8fc525d287cba1b236
|
[] |
no_license
|
cran/pubmed.mineR
|
279ce024df5b7913885ec96ad906226aa78633ce
|
7612a2d68f503794f8dee8479e8b388949e4b70d
|
refs/heads/master
| 2021-12-09T18:39:44.578225
| 2021-11-26T14:50:03
| 2021-11-26T14:50:03
| 17,919,888
| 5
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 366
|
r
|
head_abbrev.R
|
head_abbrev = function(limits,term,pmid,abs){if (missing(limits)) limits = 50;test = pmids_to_abstracts(pmid,abs); test1 = gregexpr(term,test@Abstract,fixed=T); if (test1[[1]] != -1) {test1a = NULL; for (i in 1:length(test1[[1]])){test1a = c(test1a,substr(test@Abstract,test1[[1]][i]-limits,test1[[1]][i]+limits))}; return(test1a) } else return("match not found")}
|
fbd88eb42f5965ecc10f7db04176b95d5df31857
|
002929791137054e4f3557cd1411a65ef7cad74b
|
/tests/testthat/test_calcGU.R
|
d0c39e98a03e51389d199001d9e1df2f9e40ee2a
|
[
"MIT"
] |
permissive
|
jhagberg/nprcgenekeepr
|
42b453e3d7b25607b5f39fe70cd2f47bda1e4b82
|
41a57f65f7084eccd8f73be75da431f094688c7b
|
refs/heads/master
| 2023-03-04T07:57:40.896714
| 2023-02-27T09:43:07
| 2023-02-27T09:43:07
| 301,739,629
| 0
| 0
|
NOASSERTION
| 2023-02-27T09:43:08
| 2020-10-06T13:40:28
| null |
UTF-8
|
R
| false
| false
| 722
|
r
|
test_calcGU.R
|
#' Copyright(c) 2017-2020 R. Mark Sharp
#' This file is part of nprcgenekeepr
context("calcGU")
library(testthat)
data("ped1Alleles")
test_that("calcGU forms dataframe with correct calculations", {
gu_1 <- calcGU(ped1Alleles, threshold = 1, byID = FALSE, pop = NULL)
gu_3 <- calcGU(ped1Alleles, threshold = 3, byID = FALSE, pop = NULL)
expect_equal(length(gu_1$gu[gu_1$gu == 50]), 110)
expect_equal(length(gu_3$gu[gu_3$gu == 50]), 43)
gu_1 <- calcGU(ped1Alleles, threshold = 2, byID = TRUE, pop = NULL)
gu_3 <- calcGU(ped1Alleles, threshold = 3, byID = FALSE,
pop = ped1Alleles$id[20:60])
expect_equal(length(gu_1$gu[gu_1$gu == 50]), 53)
expect_equal(length(gu_3$gu[gu_3$gu == 50]), 0)
})
|
38452d95be99ecd3eeddc3f0be55e859c24629cd
|
d84c9108891529387cb5bdb1a0fc328b407a4d66
|
/Predefined Contrasts/Predefined_Contrasts/Standardize.R
|
72f8c2fb51ab7c854be71372e6fd7cd2afa4aa54
|
[] |
no_license
|
tongtongtom/demo_predictions
|
a9c87f5813fb069c23146d3b8e6297f6be074e12
|
a905c941e88fa944f91e7eaf6518d90d01a3c0ba
|
refs/heads/master
| 2020-03-26T10:52:44.248502
| 2018-09-12T09:38:48
| 2018-09-12T09:38:48
| 144,818,972
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,210
|
r
|
Standardize.R
|
standardizeDataSet = function(Data)
{
data_mean = aggregate( Data[,c("AddictiveRate","balance","BetAmountSums","Bite","BitePerBet","BitePerUser",
"CDepositBet","CDepositPTime","CDepositPUser","ChurnRate1","ChurnRate30",
"ChurnRate7","CommissionableBet","CommissionableSums","CommissionableUser",
"CreditDepositBet","CreditDepositPTime","CreditDepositSums","CreditDepositTimes",
"CreditDepositUser","CreditWithdrawBet","CreditWithdrawPTime",
"CreditWithdrawSums","CreditWithdrawTimes","CreditWithdrawUser",
"DepositAmount","DepositBet","DepositSums","DepositTimes","DepositUser",
"DiscountBets","DiscountSums","DiscountUser","FavorableBet","FavorableSums",
"FavorableUser","FirstDepositAmount","FirstDepositBet","FirstDepositNumber",
"FirstDepositpTime","FirstDepositUser","MemberCounts","MemberDiscountBet",
"MemberDiscountSums","MemberDiscountUser","OthersBet","OthersSums",
"OthersUser","OWithdrawBet","OWithdrawPTime","OWithdrawUser","PayoffBet",
"PayoffSums","PayoffUser","RetentionRate1","RetentionRate30","RetentionRate7"
,"ThirdPartyPaymentBet","ThirdPartyPaymentPTime",
"ThirdPartyPaymentSums","ThirdPartyPaymentTimes","ThirdPartyPaymentUser",
"WagersCounts","WithdrawAmount","WithdrawBet","WithdrawSums","WithdrawTimes",
"WithdrawUser")], by= list(Data$SystemCode), FUN='mean')
data_var = aggregate( Data[,c("AddictiveRate","balance","BetAmountSums","Bite","BitePerBet","BitePerUser",
"CDepositBet","CDepositPTime","CDepositPUser","ChurnRate1","ChurnRate30",
"ChurnRate7","CommissionableBet","CommissionableSums","CommissionableUser",
"CreditDepositBet","CreditDepositPTime","CreditDepositSums","CreditDepositTimes",
"CreditDepositUser","CreditWithdrawBet","CreditWithdrawPTime",
"CreditWithdrawSums","CreditWithdrawTimes","CreditWithdrawUser",
"DepositAmount","DepositBet","DepositSums","DepositTimes","DepositUser",
"DiscountBets","DiscountSums","DiscountUser","FavorableBet","FavorableSums",
"FavorableUser","FirstDepositAmount","FirstDepositBet","FirstDepositNumber",
"FirstDepositpTime","FirstDepositUser","MemberCounts","MemberDiscountBet",
"MemberDiscountSums","MemberDiscountUser","OthersBet","OthersSums",
"OthersUser","OWithdrawBet","OWithdrawPTime","OWithdrawUser","PayoffBet",
"PayoffSums","PayoffUser","RetentionRate1","RetentionRate30","RetentionRate7"
,"ThirdPartyPaymentBet","ThirdPartyPaymentPTime",
"ThirdPartyPaymentSums","ThirdPartyPaymentTimes","ThirdPartyPaymentUser",
"WagersCounts","WithdrawAmount","WithdrawBet","WithdrawSums","WithdrawTimes",
"WithdrawUser")], by= list(Data$SystemCode), FUN='sd')
data_var[ data_var == 0 ] = 1
Data2 = Data
for ( category_id in Data$SystemCode)
{
Data2[Data$SystemCode == category_id,c("AddictiveRate","balance","BetAmountSums","Bite","BitePerBet","BitePerUser",
"CDepositBet","CDepositPTime","CDepositPUser","ChurnRate1","ChurnRate30",
"ChurnRate7","CommissionableBet","CommissionableSums","CommissionableUser",
"CreditDepositBet","CreditDepositPTime","CreditDepositSums","CreditDepositTimes",
"CreditDepositUser","CreditWithdrawBet","CreditWithdrawPTime",
"CreditWithdrawSums","CreditWithdrawTimes","CreditWithdrawUser",
"DepositAmount","DepositBet","DepositSums","DepositTimes","DepositUser",
"DiscountBets","DiscountSums","DiscountUser","FavorableBet","FavorableSums",
"FavorableUser","FirstDepositAmount","FirstDepositBet","FirstDepositNumber",
"FirstDepositpTime","FirstDepositUser","MemberCounts","MemberDiscountBet",
"MemberDiscountSums","MemberDiscountUser","OthersBet","OthersSums",
"OthersUser","OWithdrawBet","OWithdrawPTime","OWithdrawUser","PayoffBet",
"PayoffSums","PayoffUser","RetentionRate1","RetentionRate30","RetentionRate7"
,"ThirdPartyPaymentBet","ThirdPartyPaymentPTime",
"ThirdPartyPaymentSums","ThirdPartyPaymentTimes","ThirdPartyPaymentUser",
"WagersCounts","WithdrawAmount","WithdrawBet","WithdrawSums","WithdrawTimes",
"WithdrawUser")] =
((Data[Data$SystemCode == category_id, c("AddictiveRate","balance","BetAmountSums","Bite","BitePerBet","BitePerUser",
"CDepositBet","CDepositPTime","CDepositPUser","ChurnRate1","ChurnRate30",
"ChurnRate7","CommissionableBet","CommissionableSums","CommissionableUser",
"CreditDepositBet","CreditDepositPTime","CreditDepositSums","CreditDepositTimes",
"CreditDepositUser","CreditWithdrawBet","CreditWithdrawPTime",
"CreditWithdrawSums","CreditWithdrawTimes","CreditWithdrawUser",
"DepositAmount","DepositBet","DepositSums","DepositTimes","DepositUser",
"DiscountBets","DiscountSums","DiscountUser","FavorableBet","FavorableSums",
"FavorableUser","FirstDepositAmount","FirstDepositBet","FirstDepositNumber",
"FirstDepositpTime","FirstDepositUser","MemberCounts","MemberDiscountBet",
"MemberDiscountSums","MemberDiscountUser","OthersBet","OthersSums",
"OthersUser","OWithdrawBet","OWithdrawPTime","OWithdrawUser","PayoffBet",
"PayoffSums","PayoffUser","RetentionRate1","RetentionRate30","RetentionRate7"
,"ThirdPartyPaymentBet","ThirdPartyPaymentPTime",
"ThirdPartyPaymentSums","ThirdPartyPaymentTimes","ThirdPartyPaymentUser",
"WagersCounts","WithdrawAmount","WithdrawBet","WithdrawSums","WithdrawTimes",
"WithdrawUser")] -
c(data_mean[data_mean$Group.1 == category_id,c("AddictiveRate","balance","BetAmountSums","Bite","BitePerBet","BitePerUser",
"CDepositBet","CDepositPTime","CDepositPUser","ChurnRate1","ChurnRate30",
"ChurnRate7","CommissionableBet","CommissionableSums","CommissionableUser",
"CreditDepositBet","CreditDepositPTime","CreditDepositSums","CreditDepositTimes",
"CreditDepositUser","CreditWithdrawBet","CreditWithdrawPTime",
"CreditWithdrawSums","CreditWithdrawTimes","CreditWithdrawUser",
"DepositAmount","DepositBet","DepositSums","DepositTimes","DepositUser",
"DiscountBets","DiscountSums","DiscountUser","FavorableBet","FavorableSums",
"FavorableUser","FirstDepositAmount","FirstDepositBet","FirstDepositNumber",
"FirstDepositpTime","FirstDepositUser","MemberCounts","MemberDiscountBet",
"MemberDiscountSums","MemberDiscountUser","OthersBet","OthersSums",
"OthersUser","OWithdrawBet","OWithdrawPTime","OWithdrawUser","PayoffBet",
"PayoffSums","PayoffUser","RetentionRate1","RetentionRate30","RetentionRate7"
,"ThirdPartyPaymentBet","ThirdPartyPaymentPTime",
"ThirdPartyPaymentSums","ThirdPartyPaymentTimes","ThirdPartyPaymentUser",
"WagersCounts","WithdrawAmount","WithdrawBet","WithdrawSums","WithdrawTimes",
"WithdrawUser")]
)) /
c(data_var[data_var$Group.1 == category_id, c("AddictiveRate","balance","BetAmountSums","Bite","BitePerBet","BitePerUser",
"CDepositBet","CDepositPTime","CDepositPUser","ChurnRate1","ChurnRate30",
"ChurnRate7","CommissionableBet","CommissionableSums","CommissionableUser",
"CreditDepositBet","CreditDepositPTime","CreditDepositSums","CreditDepositTimes",
"CreditDepositUser","CreditWithdrawBet","CreditWithdrawPTime",
"CreditWithdrawSums","CreditWithdrawTimes","CreditWithdrawUser",
"DepositAmount","DepositBet","DepositSums","DepositTimes","DepositUser",
"DiscountBets","DiscountSums","DiscountUser","FavorableBet","FavorableSums",
"FavorableUser","FirstDepositAmount","FirstDepositBet","FirstDepositNumber",
"FirstDepositpTime","FirstDepositUser","MemberCounts","MemberDiscountBet",
"MemberDiscountSums","MemberDiscountUser","OthersBet","OthersSums",
"OthersUser","OWithdrawBet","OWithdrawPTime","OWithdrawUser","PayoffBet",
"PayoffSums","PayoffUser","RetentionRate1","RetentionRate30","RetentionRate7"
,"ThirdPartyPaymentBet","ThirdPartyPaymentPTime",
"ThirdPartyPaymentSums","ThirdPartyPaymentTimes","ThirdPartyPaymentUser",
"WagersCounts","WithdrawAmount","WithdrawBet","WithdrawSums","WithdrawTimes",
"WithdrawUser")]
))
}
#test of performance
print(apply( Data2[Data$SystemCode == category_id,c('FavorableSums','DiscountSums',
'OthersSums','MemberDiscountSums')], 2, mean))
print(apply( Data2[Data$SystemCode == category_id,c('FavorableSums','DiscountSums',
'OthersSums','MemberDiscountSums')], 2, var))
return(Data2)
}
|
bee36f3b5fbe762d3a5b9171df1f216b2cdb9d46
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/highfrequency/tests/testthat/tests_jump_tests.R
|
531eda40bcce892bf5d9b2fdc034a9f20b09e31c
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,632
|
r
|
tests_jump_tests.R
|
library("testthat")
library("highfrequency")
context("AJjumpTest unit test")
test_that("AJjumpTest unit test",{
expect_equal(
formatC(AJjumpTest(sampleTData$PRICE, p = 2, k = 3, alignBy = "seconds", alignPeriod = 5, makeReturns = TRUE)$ztest, digits = 10),
"-2.462012017"
)
})
context("JO jump test unit test")
test_that("JO jump test unit test",{
expect_equal(
formatC(as.numeric(JOjumpTest(sample5MinPricesJumps[,1], power = 6)$ztest), digits = 10),
c("0.2237433282","0.8492610224", "8.714153635", "-18.43875721", "33.00286362", "-1.12530156",
"-0.1693194718", "1.946341487", "-0.8662709953", "14.27443109", " 2.90820588", "5.505960335","-1.437705957",
"-0.07068737283","0.7935449771", "-10.81545189", "1.577474946", "-0.09450737237","-3.262432421")
)
expect_equal(
formatC(as.numeric(JOjumpTest(sample5MinPricesJumps[,1], power = 4)$ztest), digits = 4),
c("0.2345","1.043", "6.265", "-13.97", " 29.4", "-0.9539", "-0.1651", "1.328", "-0.9089", " 17", "2.782", "4.522", "-1.585", "-0.0738", "0.636",
"-11.33", "1.072", "-0.09093", "-2.554")
)
})
context("BNSjumpTest")
test_that("BNSjumpTest", {
expect_equal(
formatC(BNSjumpTest(sampleTData$PRICE, IVestimator= "minRV", 209, IQestimator = "medRQ", type= "linear", makeReturns = TRUE)$pvalue, digits = 0),
c(PRICE = "0")
)
})
context("intradayJumpTest")
test_that("LM test",{
## Extract the prices and set the time-zone to the local time-zone
library(xts)
dat <- sampleTDataMicroseconds[as.Date(DT) == "2018-01-02", list(DT, PRICE)]
jumpTest <- intradayJumpTest(pData = dat, volEstimator = "RM", driftEstimator = "none", alpha = 0.95, RM = "bipower",
lookBackPeriod = 10, dontIncludeLast = TRUE, on = "minutes", k = 5,
marketOpen = "9:30:00", marketClose = "16:00:00", tz = "GMT")
P1 <- plot(jumpTest)
lims <- P1$get_xlim()
expect_equal(
lims[1], as.numeric(as.POSIXlt(dat[1,DT]))
)
expect_equal(
lims[2], as.numeric(as.POSIXlt(dat[nrow(dat),DT]))
)
})
test_that("FoF test",{
dat <- sampleTData$PRICE
tzone(dat) <- "GMT"
storage.mode(dat) <- "numeric"
FoFtest <- intradayJumpTest(pData = dat, volEstimator = "PARM", driftEstimator = "none", alpha = 0.95, RM = "bipower",
theta = 1, lookBackPeriod = 50, marketOpen = "9:30:00", marketClose = "16:00:00", tz = "GMT")
P1 <- plot(FoFtest)
lims <- P1$get_xlim()
expect_equal(
lims[1], as.numeric(index(dat))[1]
)
expect_equal(
lims[2], as.numeric(index(dat))[nrow(dat)]
)
})
|
44e0e881e7163542717816356bf65d7dbe6756a4
|
416debf44d2175b5be50ff53c251a2ccc27176f5
|
/5_variant_plots/eduAttainInstruments.R
|
d120fbd048776962393bdb7d1466417afae8467b
|
[
"MIT"
] |
permissive
|
nay421/myopia-education-MR
|
5d1e709c12c67a10d16656008c88d34a64a485db
|
6469712909c20968311b066e61c4ace769697382
|
refs/heads/master
| 2021-09-16T10:18:23.839786
| 2018-06-19T14:29:12
| 2018-06-19T14:29:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,389
|
r
|
eduAttainInstruments.R
|
#!/usr/bin/env Rscript
#
library("ggplot2")
library("tidyverse")
library("rio")
library("MASS")
library("dplyr")
setwd("/mnt/seconddrive/data/phd/ukbiobank/myopia_EA_project/5_variant_plots/")
# Args
indata = "../2_derive_variables/output/phenotypes_alleleScores_170724.Rdata"
indosage = "../1_extract_instruments/output/EA_UKBB_Okbay.dosage"
# Load
load(indata)
dosagedata = import(indosage, format="tsv", header=T)
# Make exclusions
data = data %>% filter(!genoExcl & coreSampleGeno)
# Create data subset in same order as variants
sample_order = gsub("X", "", colnames(dosagedata)[8:ncol(dosagedata)])
data_sub = data[match(sample_order, data$eid), ]
#
# EA instruments ###########################################################
#
#
# Calc betas for EA ~ EA instruments
#
# Define regression function
run_regression = function(row) {
dosages = as.numeric(row[8:length(row)])
summ = summary(lm(data_sub$eduyears_clean ~ dosages +
data_sub$sex_genetic +
data_sub$age +
data_sub$PC1 + data_sub$PC2 + data_sub$PC3 + data_sub$PC4 +
data_sub$PC5 + data_sub$PC6 + data_sub$PC7 + data_sub$PC8 +
data_sub$PC9 + data_sub$PC10,
na.action=na.omit))
data.frame(rsid=row["rsid"],
b=summ$coefficients[2, 1],
se=summ$coefficients[2, 2],
p=summ$coefficients[2, 4],
n=length(summ$residuals))
}
# Get results
results = as.data.frame(do.call(rbind, apply(dosagedata, 1, run_regression)))
rownames(results) = results[, 1]
results = results[, -c(1)]
colnames(results) = c("b_ea", "se_ea", "p_ea", "n_ea")
eaInstr_ea_results = results
#
# Calc betas for Myopia ~ EA instruments (logistic regression)
#
# Define regression function
run_regression = function(row) {
dosages = as.numeric(row[8:length(row)])
summ = summary(glm(data_sub$isMyopic ~ dosages +
data_sub$sex_genetic +
data_sub$age +
data_sub$PC1 + data_sub$PC2 + data_sub$PC3 + data_sub$PC4 +
data_sub$PC5 + data_sub$PC6 + data_sub$PC7 + data_sub$PC8 +
data_sub$PC9 + data_sub$PC10,
na.action=na.omit, family=binomial))
data.frame(rsid=row["rsid"],
b=summ$coefficients[2, 1],
se=summ$coefficients[2, 2],
p=summ$coefficients[2, 4],
n=summ$df.residual)
}
# Get results
results = as.data.frame(do.call(rbind, apply(dosagedata, 1, run_regression)))
rownames(results) = results[, 1]
results = results[, -c(1)]
colnames(results) = c("b_myopia", "se_myopia", "p_myopia", "n_myopia")
eaInstr_myopia_results = results
#
# Calc betas for MSE ~ EA instruments
#
# Define regression function
run_regression = function(row) {
dosages = as.numeric(row[8:length(row)])
summ = summary(lm(data_sub$ave_MSE_clean ~ dosages +
data_sub$sex_genetic +
data_sub$age +
data_sub$PC1 + data_sub$PC2 + data_sub$PC3 + data_sub$PC4 +
data_sub$PC5 + data_sub$PC6 + data_sub$PC7 + data_sub$PC8 +
data_sub$PC9 + data_sub$PC10,
na.action=na.omit))
data.frame(rsid=row["rsid"],
b=summ$coefficients[2, 1],
se=summ$coefficients[2, 2],
p=summ$coefficients[2, 4],
n=length(summ$residuals))
}
# Get results
results = as.data.frame(do.call(rbind, apply(dosagedata, 1, run_regression)))
rownames(results) = results[, 1]
results = results[, -c(1)]
colnames(results) = c("b_mse", "se_mse", "p_mse", "n_mse")
eaInstr_mse_results = results
#
# Calc betas for VA ~ EA instruments
#
# Define regression function
run_regression = function(row) {
dosages = as.numeric(row[8:length(row)])
summ = summary(lm(data_sub$ave_logmar_clean_log ~ dosages +
data_sub$sex_genetic +
data_sub$age +
data_sub$ave_MSE_clean +
data_sub$PC1 + data_sub$PC2 + data_sub$PC3 + data_sub$PC4 +
data_sub$PC5 + data_sub$PC6 + data_sub$PC7 + data_sub$PC8 +
data_sub$PC9 + data_sub$PC10,
na.action=na.omit))
data.frame(rsid=row["rsid"],
b=summ$coefficients[2, 1],
se=summ$coefficients[2, 2],
p=summ$coefficients[2, 4],
n=length(summ$residuals))
}
# Get results
results = as.data.frame(do.call(rbind, apply(dosagedata, 1, run_regression)))
rownames(results) = results[, 1]
results = results[, -c(1)]
colnames(results) = c("b_va", "se_va", "p_va", "n_va")
eaInstr_va_results = results
#
# Merge and plot
#
# Split variant information
var_info = dosagedata[,1:7]
variants = dosagedata[,8:ncol(dosagedata)]
# Calc EAF
var_info$eaf = apply(variants, 1, function(x) {sum(x) / (2*length(x))})
# Combine
eaInstr_res = cbind(var_info,
eaInstr_myopia_results,
eaInstr_ea_results,
eaInstr_mse_results,
eaInstr_va_results)
write.table(eaInstr_res, file="output/eaInstr_stats.tsv", sep="\t",
quote=F, col.name=NA)
# Load
eaInstr_res = read_tsv("output/eaInstr_stats.tsv")
# Plot myopia vs. EA
ggplot(eaInstr_res, aes(y=b_myopia, x=b_ea)) +
geom_hline(yintercept=0, colour="orangered3", linetype="dotted") +
geom_vline(xintercept=0, colour="orangered3", linetype="dotted") +
geom_point() +
geom_errorbarh(aes(xmax=b_ea+1.96*se_ea, xmin=b_ea-1.96*se_ea), alpha=0.5) +
geom_errorbar(aes(ymax=b_myopia+1.96*se_myopia, ymin=b_myopia-1.96*se_myopia), alpha=0.5) +
geom_smooth(method='rlm',formula=y~x, se=T, alpha=0.2, fullrange=T) +
labs(title="Okbay et al. Eduyears instruments in UKBB",
y="β for Myopia (<-0.75 SphE) in UKBB [w 95% CI]",
x="β for Eduyears in UKBB [w 95% CI]")
ggsave(file="output/eaInstr_EA_myopia.png", height=10, width=12, dpi=300, units="cm")
# Plot MSE vs. EA
ggplot(eaInstr_res, aes(y=b_mse, x=b_ea)) +
geom_hline(yintercept=0, colour="orangered3", linetype="dotted") +
geom_vline(xintercept=0, colour="orangered3", linetype="dotted") +
geom_point() +
geom_errorbarh(aes(xmax=b_ea+1.96*se_ea, xmin=b_ea-1.96*se_ea), alpha=0.5) +
geom_errorbar(aes(ymax=b_mse+1.96*se_mse, ymin=b_mse-1.96*se_mse), alpha=0.5) +
geom_smooth(method='rlm',formula=y~x, se=T, alpha=0.2, fullrange=T) +
labs(title="Okbay et al. Eduyears instruments in UKBB",
y="β for Refractive Error (MSE) in UKBB [w 95% CI]",
x="β for Eduyears in UKBB [w 95% CI]")
ggsave(file="output/eaInstr_EA_RE.png", height=10, width=12, dpi=300, units="cm")
# Plot VA vs. EA
ggplot(eaInstr_res, aes(y=b_va, x=b_ea)) +
geom_hline(yintercept=0, colour="orangered3", linetype="dotted") +
geom_vline(xintercept=0, colour="orangered3", linetype="dotted") +
geom_point() +
geom_errorbarh(aes(xmax=b_ea+1.96*se_ea, xmin=b_ea-1.96*se_ea), alpha=0.5) +
geom_errorbar(aes(ymax=b_va+1.96*se_va, ymin=b_va-1.96*se_va), alpha=0.5) +
geom_smooth(method='rlm',formula=y~x, se=T, alpha=0.2, fullrange=T) +
labs(title="Okbay et al. Eduyears instruments in UKBB",
y="β for Visual Acuity (mean logMAR) in UKBB [w 95% CI]",
x="β for Eduyears in UKBB [w 95% CI]")
ggsave(file="output/eaInstr_EA_VA.png", height=10, width=12, dpi=300, units="cm")
|
f045c9096fc1e19c662069874f0464212959b2f8
|
b8c542780ffd64ded0400810acc59fc1da670612
|
/maxdev.R
|
75d0db4897d877f81f178059f97d1f396ad05f01
|
[] |
no_license
|
denohora/mtR
|
ef505578bf730570ccd077b021bb3367fc0952e7
|
1f0ddbdc97875b5cfc527caee011899bcaad5035
|
refs/heads/master
| 2021-01-01T06:54:08.511793
| 2013-08-15T09:33:21
| 2013-08-15T09:33:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,187
|
r
|
maxdev.R
|
maxdev <- function (x,y) {
# This is Rick Dale's max dev function translated from matlab
# [maxdeviation] = areaunder3(x, y)
#
# # find general equation of the straight line
xchange = x[1]-x[length(x)]
ychange = y[1]-y[length(y)]
slope = ychange/xchange
intercept = y[1] - slope*x[1]
# % find point on the straight line that has the same x as the real point
points = cbind(x, y)
points.straightline = cbind(x, slope*x + intercept)
d = sqrt((points[,1] - points.straightline[,1])^2 + (points[,2] - points.straightline[,2])^2)
# % get the corresponding signs for the deviation +/- the x point on the straight
y.diff = points[, 2] - points.straightline[, 2]
signs = sign(y.diff)
# % find the absolute deviations
# %abs_devs = d;
abs.devs = d*cos(atan(slope))
#
# % re-attach sign to maximum deviation
indx = match(max(abs.devs), abs.devs)
max.dev = max(abs.devs) * signs[indx]
# return max.dev
return(max.dev)
}
# # test x and y
# x = as.numeric(interp.traj[1,9:109])
# y = as.numeric(interp.traj[1,110:210])
#
# maxdev(x,y)
# maxdev(rawtraj[,2],rawtraj[,3] ) # raw trajectory and interped traj have oposite max ds because
# maxdev(trialtraj[,2],trialtraj[,3] )
|
0451ea6dabde753ddcf5180b6e6be6a8d5a8bc4b
|
987c5b180118a16e8bbcef5b42fc449d3768b060
|
/copula_helpers.R
|
f5f2d4ebb353593ecb6d94371c8eafb2894a3d10
|
[] |
no_license
|
sachin-aag/Bivariate-Copulas
|
1bed0f71a1cd65b90742c68f3bddf7b0981334b7
|
e12d5d9dd41d57187011f2cf3dc81d907f8dc180
|
refs/heads/master
| 2022-01-05T06:35:21.726746
| 2019-04-07T20:06:06
| 2019-04-07T20:06:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,259
|
r
|
copula_helpers.R
|
#'Normalise the time series
#'Normalise the time series using min-max normalisation
#'@param col The vector to be normalised
#'@return Normalised vector
normalise<-function(col) {
return((col-min(col,na.rm=TRUE))/(max(col,na.rm=TRUE)-min(col,na.rm=TRUE)))
}
#'Obtain month over month transformation
#'@param column The vector/time series
#'@return The month over month transformation of the column
getMoM<-function(column){
MoM=numeric()
MoM=c(MoM,NA)
for(i in 2:length(column)){
if(!is.na(column[i-1]))
if(column[i-1]!=0){
MoM<-c(MoM,column[i]/column[i-1])
}
else{MoM<-c(MoM,NA)}
else{MoM<-c(MoM,NA)}
}
return( MoM)
}
#'Obtain year over year transformation
#'@param column The vector/time series
#'@return The year over year transformation of the column
getYoY<-function(column){
YoY=numeric()
YoY=c(YoY,rep(NA,12))
for(i in 13:length(column)){
if(!is.na(column[i-12]))
if(column[i-12]!=0){
YoY<-c(YoY,column[i]/column[i-12])
}
else{YoY<-c(YoY,NA)}
else{YoY<-c(YoY,NA)}
}
return(YoY)
}
#'Obtain lead time adjusted \code{KPI} and \code{indicator}
#'
#'@param KPI The time series which we wish to predict
#'@param indicator The exogeneous time series which we use to predict the \code{KPI}
#'@param lag The lead time between the KPI and the indicator
#'@return A dataframe containing the KPI and the lead time adjusted indicator
getp1p2<-function(KPI,indicator,lag=0){
k=lag
p2<-KPI[((1+k):(length(KPI)))]
p1<-indicator[(1:(length(indicator)-k))]
return(data.frame('indicator'=p1,'KPI'=p2))
}
#'Find useful indicators for a KPI
#'Obtain the list of combinations of KPI and indicator which have Kenadll's tau above a certain threshold. This is used to obtain KPI and indicator time series pairs
#'@param KPI The time series which we want to find lead time indicators for
#'@param indicators The set of indicators which we need to look at
#'@param lags The set of lead times we look at.
#'@param threshold The threshold for kendall's tau above which we consider an indicator useful in modelling the KPI
#'@return A list conating all the set of indicators and lead times which have kendall's tau above the said threshold
getUseful_combs<-function(KPI,indicators,threshold=.23,lags=0:18){
x <- array(rep(0, ncol(KPI)*ncol(indicators)*length(lags)), dim=c(ncol(indicators),ncol(KPI),length(lags)))
count=1
useful_comb<-list()
for(i in colnames(indicators)){
ts1=indicators[,i]
for(j in colnames(KPI)){
ts2=KPI[,j]
for(k in lags){
ts1_temp=ts1[(1:(length(ts1)-k))]
ts2_temp=ts2[((1+k):(length(ts2)))]
x[which(colnames(indicators)==i),which(colnames(KPI)==j),(k+1)]=abs(
cor(ts1_temp,ts2_temp,method='kendall',use='pairwise.complete.obs'))
cde=abs(cor(ts1_temp,ts2_temp,method='kendall',use='pairwise.complete.obs'))
if(is.na(cde)){
cde=0
}
if(cde>threshold){
temp_list=list()
temp_list[['ts1_name']]=i
temp_list[['ts2_name']]=j
temp_list[['lag']]=k
temp_list[['kendall']]=cde
temp_list[['pearson']]=abs(cor(ts1_temp,ts2_temp,use='pairwise.complete.obs'))
useful_comb[[count]]=temp_list
count=count+1
}
}
}
}
return(useful_comb)
}
#'Calculate negative log likelihood
#'Obtain the negative log likelihood for the vector\code{x} for a skew-t distribution
#'@param x The vector for which we wish to find the negative log likelihood
#'@param params The paramters of the skew-t distribution
#'@return The negative log likelihood of the vector \code{x} with respect to the skew-t distribution specified by \code{params}
l_skewT = function(x,params){
-sum(log(dskt(x, df = params[1],gamma=params[2])))
}
#'Calculate MAPE
#'Get the mean absolute percentage error for the prediction and the target. This is one of the metrics to benchmark our results.
#'
#'@param pred The prediction from the model
#'@param target The actuals for the said period
#'@return The MAPE between the prediction and the target
getMAPE<-function(pred,target){
err=0
count=0
for (i in c(1:length(pred))){
if((target[i]!=0)&(!is.na(target[i]))){
err=err+abs((pred[i]-target[i])/(target[i]))
count=count+1
}
}
err=err/count
return (err)
}
#'Compute RMSE
#'Get the root mean square error for the prediction and the target. One of the metrics to benchamrk our results.
#'@param pred The prediction from the model
#'@param target The actuals for the said period
#'@return The RMSE between the prediction and the target
getRMSE<-function(pred,target){
err=0
count=0
for (i in c(1:length(pred))){
if((!is.na(target[i]))){
err=err+abs((pred[i]-target[i])^2)
count=count+1
}
}
err=err/count
err=err^.5
return (err)
}
#'Compute \code{L2} norm
#'Get the \code{L2} norm for each row of the matrix
#'@param diff_mat The matrix for which we want to calculate the norm for each row
#'@return A vector containing the \code{L2} norm for each row
getNorms<-function(diff_mat){
nm<-numeric()
for(i in 1:nrow(diff_mat)){
nm<-c(nm,norm(as.matrix(diff_mat[i,])))
}
return(nm)
}
#'Calculate optimal lead time
#'Plot the variation of kendall's tau with varying lead times. Useful to identify optimal lead time between KPI and indicator
#'@param KPI A vector which we wish to find the optimal lead time
#'@param indicator The exogeneous time series vector
#'@param max.lag The maximum lead time which we look at while finding the optimal lead time
#'@param plot A boolean option which we use to indicate whether plot should be generated or not
#'@return The lead time for which we find the kendall's tau value to be maximum
plotccf_kendall<-function(KPI,indicator,max.lag=18,plot=FALSE){
corr_vals<-numeric()
for(i in 0:max.lag){
df<-getp1p2(KPI=KPI,indicator = indicator,lag=i)
corr_vals<-c(corr_vals,cor(df,method = 'kendall',use='complete.obs')[1,2])
}
if(plot==TRUE){
plot(corr_vals,type='b')
}
return(which.max(corr_vals))
}
#'Extract trend
#'Extract the trend from the \code{data} vector
#'@param data The vector/time series from which the trend is to be extracted
#'@return The trend after extraction from data
getTrend<-function(data,s.window=7,t.window=12){
return(as.vector(stl(ts(data,frequency=12),s.window=s.window,t.window=t.window)$time.series[,2]))
}
#'Extract Seasonality
#'Extract the seasonality from the \code{data} vector
#'@param data The vector/time series from which the seasonality is to be extracted
#'@return The seasonality after extraction from data
getSeasonality<-function(data,s.window=7,t.window=12){
return(as.vector(stl(ts(data,frequency=12),s.window=s.window,t.window=t.window)$time.series[,1]))
}
#'Remove trend and seasonality
#'Remove the trend and seasonality from the \code{data} vector
#'@param data The vector/time series from which the trend and seasonality is to be removed
#'@return The data after removing trend and seasonality
remTrendSeasonality<-function(data,s.window=7,t.window=12,seasonality=1,trend=1){
return(data-trend*getTrend(data,s.window=s.window,t.window=t.window)-seasonality*getSeasonality(data,s.window=s.window,t.window=t.window))
}
#'Find best indicator and lead time
#'Gets the indicator and lead time from a set of indicators which ahs the maximum kendall's tau with repsect to the said KPI
#'@param KPI The KPi for which we wish to find an exogeneous time series and appropriate lead time
#'@param indicators The set of indicators which we look at to find the best exogeneous time series variable
#'@param max.lag The maximum lag which is considered while finding an exogeneous time series
#'@param min.lag The minimum lag which is considered while finding an exogeneous time series
#'@return A list conating the indicator name and the lag which provide maximum dependency with the KPI
getMaxKtau<-function(KPI,indicators,max.lag=18,min.lag=5){
max=0
max_ind=indicators[,1]
max_lag<-0
cnt=1
for(i in indicators){
for(j in min.lag:max.lag){
df<-getp1p2(KPI,i,lag=j)
cor_val<-cor(df$KPI,df$indicator,method='kendall',use='pairwise.complete.obs')
if(is.na(cor_val)){
cor_val<-0
}
if(abs(cor_val)>max){
max=abs(cor_val)
max_ind_name<-colnames(indicators)[cnt]
max_lag<-j
}
}
cnt=cnt+1
}
ret_list<-list()
ret_list[[1]]<-max
ret_list[[2]]<-max_ind_name
ret_list[[3]]<-max_lag
return(ret_list)
}
#'Compute MASE
#'Get the mean absolute square error for the prediction and the target. One of the metrics to benchamrk our results.
#'@param pred The prediction from the model
#'@param target The actuals for the said period
#'@return The MASE between the prediction and the target
getMASE<-function(pred,target){
total<-length(pred)
nr<-0
dr<-0
for(i in 1:total){
nr<-nr+abs(pred[i]-target[i])
if(i!=1){
dr<-dr+abs(target[i]-target[i-1])
}
}
mase<-nr/dr
mase<-mase*(total-1)/total
return(mase)
}
#'Place multiple plots together
#'Utility function to place multiple plots generated by \code{ggplot} on the same page
#'@return NULL
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
#'Calculate required business transformation
#'Calculate month-over-month or year-over-year transformation based on argument
#'@param series The time series which needs to be transformed
#'@param trafo A string which should be one of c(NULL, 'mom','yoy') to specify which transformation.
#'@return Return the transformed series.
get_transformed_series<-function(series,trafo=NULL){
if(is.null(trafo)){
return(series)
}
if(trafo=='mom'){
return(getMoM(series))
}
if(trafo=='yoy'){
return(getYoY(series))
}
return(series)
}
#'Reverse business tranformation
#'Reverse the business transformation to obtain the raw forecast and time series
#'@param series The transformed series
#'@param trafo A string which should be one of c(NULL, 'mom','yoy') to specify which transformation to reverse
#'@param init_vals The initial values from the raw time series.
#'@return The raw time series after reversing the business transformation.
rev_transformation<-function(series,trafo=NULL,init_vals){
if(is.null(trafo)){
return(series)
}
if(trafo=='mom'){
return(revMoM(init_vals[1],series))
}
if(trafo=='yoy'){
return(revYoY(init_vals[1:12],series))
}
}
#'Reverse month over month transformation
#'@param series The transformed series
#'@param init_vals The initial values from the raw time series.
#'@return The raw time series after reversing the month over month transformation.
revMoM<-function(init_val,MoM_vals){
raw_data<-numeric()
raw_data<-c(raw_data,init_val)
for(i in 1:length(MoM_vals)){
raw_data<-c(raw_data,raw_data[i]*MoM_vals[i])
}
return(raw_data)
}
#'Reverse month over month transformation
#'@param series The transformed series
#'@param init_vals The initial values from the raw time series.
#'@return The raw time series after reversing the year over year transformation.
revYoY<-function(init_vals,YoY_vals){
raw_data<-numeric()
raw_data<-c(raw_data,init_vals)
for(i in 1:length(YoY_vals)){
raw_data<-c(raw_data,raw_data[i]*YoY_vals[i])
}
return(raw_data)
}
#'Wrapper function for copula forecast
#'@param KPI The time series which needs to be predicted
#'@param indicator_set The set of indicators which are looked through for finding an exogeneous time series
#'@param indicator_name The indicator which is used to forecast the \code{KPI}. If it is \code{NULL}, then maximum kendall's tau is used to automatically pick the exogeneous time series
#'@param min.lag The minimum lead time which is looked at while picking the exogeneous time series
#'@param max.lag The maximum lwead time which is looked at while picking the exogeneous time series
#'@param kpi_trafo The transformation which is applied to the KPI before forecasting
#'@param ind_trafo The transformation which is applied to the indicator before forecasting
#'@param KPI_outlier_corr Boolean to indicate whether to perform outlier correction on the KPI
#'@param indicator_outlier_corr Boolean to indicate whether to perform outlier correction on the indicator
#'@param offset Indicates whether the copula forecast should be offset or not. The offset is determined by the
#'@param dates Deprecated
#'@return The median and 2 sigma forecast from the copula model
getCopfc<-function(KPI,indicator_set,indicator_name=NULL,min.lag=5,
max.lag=18,lag=NULL,kpi_trafo=NULL,ind_trafo=NULL,
KPI_outlier_corr=FALSE,indicator_outlier_corr=FALSE,
offset=TRUE,dates=NULL){
KPI_orig<-KPI
KPI<-get_transformed_series(KPI,trafo=kpi_trafo)
indicator_set<-as.data.frame(sapply(indicator_set,function(x) get_transformed_series(x,trafo=ind_trafo)))
if(is.null(indicator_name)){
best_ind<-getMaxKtau(KPI,indicator_set,min.lag = min.lag,max.lag = max.lag)
indicator_name<-best_ind[[2]]
lag<-best_ind[[3]]
}
indicator<-indicators[,indicator_name]
indicator<-imputeTS::na.ma(indicator)
if(is.null(lag)){
lag<-plotccf_kendall(KPI,indicator=indicator)
}
#browser()
df<-as.data.frame(cbind(KPI,indicator))
colnames(df)<-c('KPI','indicator')
if(!is.null(dates)){
df$dates<-dates
}
df<-df[complete.cases(df),]
df_beg<-df[1:12,]
ind_vals<-df$indicator[(nrow(df)-lag+1):nrow(df)]
dates<-df$dates
df<-getp1p2(df$KPI,df$indicator,lag=lag)
#df$dates<-dates[]
if(KPI_outlier_corr){
df$KPI<-as.vector(tsoutliers::tso(ts(df$KPI))$yadj)
}
if(indicator_outlier_corr){
df$KPI<-as.vector(tsoutliers::tso(ts(df$indicator))$yadj)
}
mean_ind<-mean(df$indicator)
mean_KPI<-mean(df$KPI)
sd_ind<-sd(df$indicator)
sd_KPI<-sd(df$KPI)
df$indicator<-as.vector(scale(df$indicator))
df$KPI<-as.vector(scale(df$KPI))
indicator_df = optim(f = l_skewT, par=c(3,0.5),x=df$indicator ,method='Nelder')
KPI_df = optim(f = l_skewT, par=c(3,0.5),x=df$KPI,method='Nelder')
param_indicator<-indicator_df$par
param_KPI<-KPI_df$par
cdf_indicator<-pskt(df$indicator,df=param_indicator[1],gamma=param_indicator[2])
cdf_KPI<-pskt(df$KPI,df=param_KPI[1],gamma=param_KPI[2])
bc<-VineCopula::BiCopSelect(cdf_indicator,cdf_KPI)
median_fc<-numeric()
mean_fc<-numeric()
low_fc <- numeric()
high_fc <- numeric()
high2_fc<-numeric()
low2_fc<-numeric()
quantiles_fc<-list()
i=1
s<-(1:99)/100
quantiles_mat<-matrix(nrow=length(s),ncol=lag)
#browser()
ind_vals<-(ind_vals-mean_ind)/sd_ind
for(ind_val in ind_vals){ # ind_val = -1.559593
ind_cdf<-pskt(ind_val,df=param_indicator[1],gamma=param_indicator[2])
cond_sim_cdf<-VineCopula::BiCopCondSim(100000,cond.val=ind_cdf,cond.var = 1,obj=bc)
cond_sim<-qskt(cond_sim_cdf,df=param_KPI[1],gamma=param_KPI[2])
median_fc<-c(median_fc,median(cond_sim))
mean_fc<-c(mean_fc,mean(cond_sim))
high_fc <-c(high_fc, summary(cond_sim)[5])
low_fc <-c(low_fc, summary(cond_sim)[2])
high2_fc<-c(high2_fc,quantile(cond_sim,.95))
low2_fc<-c(low2_fc,quantile(cond_sim,.05))
quantiles_fc[[i]]<-c(quantiles_fc,quantile(cond_sim,s))
quantiles_mat[,i]<-as.vector(quantile(cond_sim,s))
i<-i+1
}
fc_df<-as.data.frame(cbind(as.vector(low2_fc),as.vector(low_fc),as.vector(mean_fc),as.vector(median_fc),
as.vector(high_fc),as.vector(high2_fc)))
colnames(fc_df)<-c('minus_two_sig','minus_sig','mean','median','sig','two_sig')
fc_df<- as.data.frame( apply(fc_df, 1, function(x) return(x*sd_KPI+mean_KPI) ))
fc_df<-as.data.frame( apply(fc_df, 1, function(x) rev_transformation(x,trafo = kpi_trafo,df_beg$KPI) ))
if(offset){
ar_mod<-auto.arima(KPI)
next_fc<-forecast(ar_mod,h=1)
next_fc<-as.vector(next_fc$mean)
offset<-fc_df$mean[1]-next_fc
fc_df<-fc_df-offset
}
combined_df<-as.data.frame(matrix(NA,nrow=length(KPI_orig),ncol=ncol(fc_df)))
colnames(combined_df)<-colnames(fc_df)
combined_df<-rbind(combined_df,fc_df)
combined_df$KPI<-c(KPI_orig,rep(NA,lag))
return(fc_df)
}
|
9417e36075f5bf66c0d33575ece0ed4fdba902ac
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/SixSigma/examples/ss.data.doe2.Rd.R
|
ff5142a5d5cf90935358c6546a64c8f3d4c66d5a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 273
|
r
|
ss.data.doe2.Rd.R
|
library(SixSigma)
### Name: ss.data.doe2
### Title: Data for the pizza dough example (robust design)
### Aliases: ss.data.doe2
### Keywords: data doe
### ** Examples
data(ss.data.doe2)
summary(ss.data.doe2)
lattice::bwplot(score ~ temp | time, data = ss.data.doe2)
|
25e1ae2ad0badeb4cd7c499dfb69f61d043706d2
|
51864fb6306cb11dc5e4d79b1e3125bd619eb3a7
|
/TDA_exp/1/test.R
|
94871ded9e75f847172bff4fc054faec813e3b3c
|
[] |
no_license
|
Jayanth-kumar5566/TDA
|
18504ab326da3992fcff61821462b0569f653737
|
892844e6445d8518f5cf35c4bd2cc3918538f2e0
|
refs/heads/master
| 2021-04-27T02:50:52.104043
| 2018-07-02T09:46:52
| 2018-07-02T09:46:52
| 122,701,898
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,171
|
r
|
test.R
|
library("TDA")
X<-read.csv("5/data6.csv")
X=X[1:2,]
summary(X)
#Creating grid
by<-0.1
margin_x<-seq(0,200,by=by)
margin_y<-seq(1,20,by=by)
Grid=expand.grid(margin_x,margin_y)
#plot(Grid)
distance <- distFct(X = X, Grid = Grid)
persp(margin_x, margin_y,matrix(distance, ncol = length(margin_y), nrow = length(margin_x)), xlab = "",
ylab = "", zlab = "", theta = -20, phi = 35, ltheta = 50,
col = 2, border = NA, main = "KDE", d = 0.5, scale = FALSE,
expand = 3, shade = 0.9)
DiagRips <- ripsDiag(X = X,1,20,
library = "GUDHI", location = TRUE, printProgress = FALSE)
DiagGrid<-gridDiag(X=X,FUN=distFct,lim=cbind(c(1,20),c(1,20)),by=by,
maxdimension = 0,sublevel = FALSE,printProgress = TRUE)
#jpeg('persistance.jpg')
par(mfrow=c(1,2))
plot(X,xlim=c(1,20),ylim=c(1,20))
plot(DiagGrid[["diagram"]],main='L2 norm',barcode = TRUE)
plot(DiagRips[["diagram"]],main='L2 norm')
DiagGrid=DiagRips
i=dim(DiagGrid$diagram)[1]
j=length(which(DiagGrid$diagram[1:i]==0))
birth=DiagGrid$diagram[(i+1):(i+j)]
death=DiagGrid$diagram[(2*i+1):(2*i+j)]
plot(birth,death,'p',xlim=c(0,25),ylim = c(0,25))
lines(0:15,0:15)
#dev.off()
|
818b4a1809d37b2745d62c1f76a35e90a3c29ae0
|
8585dd8814d82d9a0870804d8a5acf9ad650d0ed
|
/man/point_to_interval_distance.Rd
|
e1347a3f1cc077d837bac43cb17b1c3dfacf7971
|
[] |
no_license
|
brentonk/coefbounds
|
7500d38188c87b41c2b6ebdbef5f1d5f04517dce
|
7c7b65a7d34ecec01ac6a6f1062c4eeab24cab08
|
refs/heads/master
| 2021-01-17T19:17:25.817055
| 2016-06-28T21:33:03
| 2016-06-28T21:33:03
| 59,677,826
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 696
|
rd
|
point_to_interval_distance.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hausdorff_distance.r
\name{point_to_interval_distance}
\alias{point_to_interval_distance}
\title{Distance from a point to an interval}
\usage{
point_to_interval_distance(point, interval)
}
\arguments{
\item{point}{vector of points}
\item{interval}{matrix of intervals, with 2 columns and number of rows equal
to the length of \code{point}}
}
\value{
vector of distances
}
\description{
Calculates the distance from each point to the closest element of the
corresponding interval. Doesn't check validity of arguments; should only be
called within hausdorff_distance().
}
\author{
Brenton Kenkel
}
\keyword{internal}
|
0ad93b787245f5b27627bde1e9a741b5a0af322a
|
c8e490752620f08d43e6120844e9f46adf72ee4d
|
/R/ThreeArmedTrials.R
|
76563de29e969f4fd48c9dc89310afa467ccb52c
|
[] |
no_license
|
cran/ThreeArmedTrials
|
a4b31e0a9b43108e0811ee28a2b22f195e606c81
|
b9743d694a1c0b5cc6efc6d3d0172c3c9d79068e
|
refs/heads/master
| 2022-12-23T21:43:25.366401
| 2022-12-16T12:00:05
| 2022-12-16T12:00:05
| 28,789,762
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,678
|
r
|
ThreeArmedTrials.R
|
#' @name ThreeArmedTrials
#' @docType package
#' @title Design and Analysis of Three-armed Clinical Non-Inferiority or Superiority Trials with Active and Placebo Control
#' @description The package \pkg{ThreeArmedTrials} provides functions for designing
#' and analyzing non-inferiority or superiority trials with an active and a placebo control.
#' Non-inferiority and superiority are defined through the hypothesis
#' \eqn{(\lambda_P - \lambda_E)/(\lambda_P - \lambda_R) \le \Delta} with the alternative hypothesis
#' \eqn{(\lambda_P - \lambda_E)/(\lambda_P - \lambda_R) > \Delta}.
#' The parameters \eqn{\lambda_E}, \eqn{\lambda_R}, and \eqn{\lambda_P} are associated with
#' the distribution of the endpoints and smaller values of \eqn{\lambda_E}, \eqn{\lambda_R},
#' and \eqn{\lambda_P} are considered to be desirable. A detailed description of these parameters
#' can be found in the help file of the individual functions. The margin \eqn{\Delta} is between 0
#' and 1 for testing non-inferiority and larger than 1 for testing superiority.
#'
#' A detailed discussion of the hypothesis can be found in Hauschke and Pigeot (2005).
#'
#' The statistical theory for negative binomial distributed endpoint has been developed by Muetze et al. (2015).
#' @import stats
#' @import MASS
#' @author Tobias Muetze \email{tobias.muetze@@outlook.com}
#' @references
#' Hauschke, D. and Pigeot, I. 2005. \dQuote{Establishing efficacy of a new experimental treatment in the 'gold standard' design.} Biometrical Journal 47, 782--786.
#' Muetze, T. et al. 2015. \dQuote{Design and analysis of three-arm trials with negative binomially distributed endpoints.} \emph{Submitted.}
NULL
|
6a20855414688c651ddc07f0b0f3b72bfd2335ca
|
e86b47a5bd015f11e376ad4a2fc23d61b0aab782
|
/man/FDR_control_adaptive.Rd
|
5950a61a8e8b89e908437f1017afa327618c01ad
|
[] |
no_license
|
ziqiaow/IMIX
|
57e60fd825db4612dfe630e50e2ba459b82c3c3c
|
ec276b2944c55403ede4edc5b299e7b4aa976ac2
|
refs/heads/master
| 2022-11-17T16:02:43.601468
| 2022-11-10T02:02:44
| 2022-11-10T02:02:44
| 238,075,557
| 6
| 5
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,820
|
rd
|
FDR_control_adaptive.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FDR_control_adaptive.R
\name{FDR_control_adaptive}
\alias{FDR_control_adaptive}
\title{The Adaptive Procedure for Across-Data-Type FDR Control}
\usage{
FDR_control_adaptive(lfdr, alpha)
}
\arguments{
\item{lfdr}{Local FDR for each gene of the mixture model results for one component or a combination of components}
\item{alpha}{Prespecified FDR control level}
}
\value{
The estimated mFDR for the target component or component combinaitons and whether the genes is classified in this component/combination after FDR control at alpha level, 1 is yes, 0 is no.
\item{significant_genes_with_FDRcontrol}{The output of each gene ordered by the components based on FDR control and within each component ordered by the local FDR, "localFDR" is 1-posterior probability of each gene in the component based on the maximum posterior probability, "class_withoutFDRcontrol" is the classified component based on maximum posterior probability, "class_FDRcontrol" is the classified component based on the across-data-type FDR control at alpha level}
\item{estimatedFDR}{The estimated marginal FDR value for each component starting from component 2 (component 1 is the global null)}
\item{alpha}{Prespecified nominal level for the across-data-type FDR control}
}
\description{
The adaptive procedure for across-data-type FDR control based on the output from IMIX models, this can be directly performed by IMIX function, however, if the user is interested in other mixture models, alpha level or combinations of components, this function would be useful.
}
\examples{
\donttest{
# First load the data
data("data_p")
# Specify inititial values (this step could be omitted)
mu_input <- c(0,3,0,3)
sigma_input <- rep(1,4)
p_input <- rep(0.5,4)
test1 <- IMIX(data_input = data_p,data_type = "p",mu_ini = mu_input,sigma_ini = sigma_input,
p_ini = p_input,alpha = 0.1,model_selection_method = "AIC")
# Check the selected model based on AIC value
test1$`Selected Model`
# Below is an example for data example 1 in controlling
# the FDR at 0.2 for component 2 & component 4.
# First calculate the local FDR for component 2 & component 4:
lfdr_ge_combined <- 1 - (test1$IMIX_cor_twostep$`posterior prob`[,2] +
test1$IMIX_cor_twostep$`posterior prob`[,4]) # Class 2: (ge+,cnv-); class 4: (ge+,cnv+)
names(lfdr_ge_combined) <- rownames(test1$IMIX_cor_twostep$`posterior prob`)
# Perform the across-data-type FDR control for component 2 & component 4 at alpha level 0.2
fdr_control1 <- FDR_control_adaptive(lfdr = lfdr_ge_combined, alpha = 0.2)
}
}
\references{
Ziqiao Wang and Peng Wei. 2020. “IMIX: a multivariate mixture model approach to association analysis through multi-omics data integration.” Bioinformatics. <doi:10.1093/bioinformatics/btaa1001>.
}
|
7fb8adef8b262ba8f8c46ed59b42ff9461beba7d
|
c7d5fa4a80cf89aeb6e17159a0953ad90ad1f4dc
|
/man/popgen.Rd
|
02ea28a2be9d18311838653fc22f81b3976522dc
|
[] |
no_license
|
cran/PopGenKit
|
4ac75f13f1f189d006d2fe95550eecb8854d1011
|
57588283dc44a661993babce5570e2bec9a3945b
|
refs/heads/master
| 2021-01-18T08:07:28.105429
| 2011-07-21T00:00:00
| 2011-07-21T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,054
|
rd
|
popgen.Rd
|
\name{popgen}
\alias{popgen}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Compute allele frequencies, population genetic parameters, and pairwise Fst values from a Arlequin (.arp) input file
%% ~~function to do ... ~~
}
\description{This function does not do anything that other population genetic software could not do, but provides a quick way to obtain allele frequencies in a table format (overall and within each population), and it can calculate allelic richness, number of private alleles, expected and observed heterozygosity (He and Ho), and population pairwise Fst values, for each locus and across all markers.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
popgen(datafile, ndigit = 3, freq.overall = T, freq.by.pop = T, genetic.stats = T, pairwise.fst = T)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{datafile}{Name of the input Arlequin file, with extension (e.g. 'glyptemys3.arp').
%% ~~Describe \code{datafile} here~~
}
\item{ndigit}{Number of digits per allele in input file. Can be 2 or 3. (Default is 3).
%% ~~Describe \code{ndigit} here~~
}
\item{freq.overall}{Obtain a table of overall allele frequencies?
%% ~~Describe \code{freq.overall} here~~
}
\item{freq.by.pop}{Obtain a table of allele frequencies in each population?
%% ~~Describe \code{freq.by.pop} here~~
}
\item{genetic.stats}{Obtain a table listing number of alleles, allelic richness, private alleles, Ho and He?
%% ~~Describe \code{genetic.stats} here~~
}
\item{pairwise.fst}{Obtain a table of population pairwise Fst values, for each marker and overall?
%% ~~Describe \code{pairwise.fst} here~~
}
}
\details{Some .arp files might not be imported correctly depending on which software was used to generate them. If this is the case, start from the Genepop file and convert it first with \code{\link{convert}}.
All output files will be saved to the working directory.
Allelic richness is calculated by jackknife resampling (1000 replicates), and the sample size for each locus is determined as the smallest number of individuals sampled among all populations for that specific locus. Thus, the sample size may vary between markers.
Pairwise Fst values are calculated according to Wright's formula (Wright 1931; 1951). This option is provided as a data exploration tool. Numerous software allow the computation of Fst indices that integrate sampling effects, such as Weir and Cockerham's theta (1984), and these indices should be preferably reported. See Holsinger and Weir (2009) for more details.
%% ~~ If necessary, more details than the description above ~~
}
\value{Results will be saved in tab-delimited text files in the working directory.
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{Excoffier, L. and H.E.L. Lischer (2010). Arlequin suite ver 3.5: A new series of programs to perform population genetics analyses under Linux and Windows. Mol. Ecol. Res. 10: 564-567.
Holsinger, K.E. and B.S. Weir (2009) Genetics in geographically structured populations: defining, estimating and interpreting Fst. Nat. Rev. Genet. 10:639-650.
Rousset, F. (2008). Genepop'007: a complete reimplementation of the Genepop software for Windows and Linux. Mol. Ecol. Res. 8: 103-106.
Weir, B.S. and C.C. Cockerham (1984) Estimating F-statistics for the analysis of population structure. Evolution 38: 1358-1370.
Wright, S. (1931) Evolution in Mendelian populations. Genetics 16: 97-159.
Wright, S. (1951) The genetical structure of populations. Annals of Eugenics 15: 323-354.
%% ~put references to the literature/web site here ~
}
\author{Sebastien Rioux Paquette
%% ~~who you are~~
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
fc8bbcd1e4b136354faf763eed1a36d8d9d294c7
|
b63f66b2cb6d40f69d6a5ff60ce5b100468110b1
|
/R/lazy-package.r
|
8be247941da3dc952b3234e3dc2542e4af901b04
|
[] |
no_license
|
lionel-/rlang
|
f32fb0130506c3c960a5a9e65fc417b373cec273
|
755a821b760150533009d4445e7c767e721a2361
|
refs/heads/master
| 2021-09-24T07:53:16.608225
| 2014-08-12T14:15:58
| 2014-08-12T14:15:58
| 73,101,742
| 0
| 1
| null | 2016-11-07T17:05:53
| 2016-11-07T17:05:53
| null |
UTF-8
|
R
| false
| false
| 51
|
r
|
lazy-package.r
|
#' lazy.
#'
#' @name lazy
#' @docType package
NULL
|
51b8c8a667168d1f5b3a5d0c79a19c7024424885
|
c203696d6f02d775f7787623c386e1b59601f56e
|
/Instacart Market Basket Analysis Code/stability_metrics.R
|
d4ba2e05f745244467d34abb0c83bd9495f6089b
|
[] |
no_license
|
ChanningC12/Kaggle
|
4f6fe7e9698931e0bfa039243de0003f4709f958
|
51d8c9db8cf05eb7f766547d36960c8673896150
|
refs/heads/master
| 2020-04-05T11:43:25.087414
| 2017-08-14T01:29:38
| 2017-08-14T01:29:38
| 81,113,342
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,556
|
r
|
stability_metrics.R
|
######################### Stability Variables Creation ##################################
rm(list=ls())
gc()
library(data.table)
library(dplyr)
setwd("~/../Desktop/Kaggle & Coursera/Kaggle Projects/Instacart Market Basket Analysis/Raw Data/")
orderp <- fread("order_products__prior.csv/order_products__prior.csv") # order_id and product_id level
ordert <- fread("order_products__train.csv/order_products__train.csv") # order_id level
orders <- fread("orders.csv/orders.csv") # order_id level
# Map user_id to order train dataset
ordert$user_id <- orders$user_id[match(ordert$order_id, orders$order_id)] # match the user_id for ordert table
# Merge user_id, order information to order_prior dataset, only keep prior orders to build variables
orders_products <- orders %>% inner_join(orderp, by = "order_id")
# Variable 1: For each user, calculate the std of order quantity for each order
order_quantity <- orders_products %>% group_by(user_id, order_id) %>%
dplyr::summarize(order_quantity = max(add_to_cart_order,na.rm = T))
order_quantity_std <- order_quantity %>% group_by(user_id) %>%
dplyr::summarize(user_order_quantity_sd = sd(order_quantity,na.rm = T))
fwrite(order_quantity_std,"../Processed Data/order_quantity_std.csv",row.names = F)
# Variable 2: For each user by product, calculate the std of order quantity for each order containing the product
orders_products <- orders_products %>% group_by(user_id, order_id) %>%
dplyr::mutate(order_quantity = max(add_to_cart_order,na.rm = T))
order_prod_quantity_std <- orders_products %>% group_by(user_id,product_id) %>%
dplyr::summarize(user_prod_order_quantity_sd = sd(order_quantity,na.rm = T))
fwrite(order_prod_quantity_std,"../Processed Data/order_prod_quantity_std.csv",row.names = F)
# Variable 3: Standard deviation of add_to_cart position
# Variable 4: Standard deviation of add_to_cart percentage
user_prod_addToCart_sd <- orders_products %>%
dplyr::mutate(add_to_cart_pct = add_to_cart_order / order_quantity) %>% group_by(user_id,product_id) %>%
dplyr::summarize(user_prod_addToCart_sd = sd(add_to_cart_order,na.rm = T),
user_prod_addToCart_pct_sd = sd(add_to_cart_pct,na.rm = T))
fwrite(user_prod_addToCart_sd,"../Processed Data//user_prod_addToCart_sd.csv",row.names = F)
# Variable 5: Standard deviation of user purchase interval
user_interval_sd <- orders %>% group_by(user_id) %>%
dplyr::summarize(user_interval_sd = sd(days_since_prior_order,na.rm = T))
fwrite(user_interval_sd,"../Processed Data/user_interval_sd.csv",row.names = F)
# Variable 6: Average days interval by user and product since first time ordering the product
orders[is.na(orders$days_since_prior_order),]$days_since_prior_order <- 0
orders <- orders %>% arrange(user_id,order_number) %>% group_by(user_id) %>%
dplyr::mutate(days_since_prior_order_cum = cumsum(days_since_prior_order))
orders_products <- orders_products %>% left_join(orders %>% select(user_id,order_id,days_since_prior_order_cum),
by=c("user_id","order_id"))
orders_products <- orders_products %>% arrange(user_id,product_id,order_number)
user_prod_interval <- orders_products %>% group_by(user_id,product_id) %>%
dplyr::summarize(up_avg_interval = (max(days_since_prior_order_cum) - min(days_since_prior_order_cum)) / n())
fwrite(user_prod_interval,"../Processed Data/user_prod_interval.csv",row.names = F)
# Variable 7: Standard deviation of user product purchase interval
orders_products <- orders_products %>% select(user_id,product_id,order_number,days_since_prior_order_cum)
orders_products <- orders_products %>% group_by(user_id,product_id) %>%
dplyr::mutate(days_since_prior_order_cum_lag = dplyr::lag(days_since_prior_order_cum,n=1,default = NA))
orders_products$up_days_interval <- orders_products$days_since_prior_order_cum - orders_products$days_since_prior_order_cum_lag
up_days_interval_sd <- orders_products %>% group_by(user_id,product_id) %>%
dplyr::summarize(up_days_interval_sd = sd(up_days_interval,na.rm = T))
fwrite(up_days_interval_sd,"../Processed Data/up_days_interval_sd.csv",row.names = F)
##################### Compile all metrics ##########################
rm(list=ls())
gc()
order_quantity_std <- fread("../Processed Data/order_quantity_std.csv")
user_interval_sd <- fread("../Processed Data/user_interval_sd.csv")
order_prod_quantity_std <- fread("../Processed Data/order_prod_quantity_std.csv")
user_prod_addToCart_sd <- fread("../Processed Data//user_prod_addToCart_sd.csv")
user_prod_interval <- fread("../Processed Data/user_prod_interval.csv")
up_days_interval_sd <- fread("../Processed Data/up_days_interval_sd.csv")
# user_prod_order_quantity_sd, if only one order made by a user for a product, sd is missing, impute them as separate group
order_prod_quantity_std$up_order_quantity_sd_grp <-
ifelse(order_prod_quantity_std$user_prod_order_quantity_sd < quantile(order_prod_quantity_std$user_prod_order_quantity_sd,na.rm = T,probs = 0.25),1,
ifelse(order_prod_quantity_std$user_prod_order_quantity_sd < quantile(order_prod_quantity_std$user_prod_order_quantity_sd,na.rm = T,probs = 0.5),2,
ifelse(order_prod_quantity_std$user_prod_order_quantity_sd < quantile(order_prod_quantity_std$user_prod_order_quantity_sd,na.rm = T,probs = 0.75),3,4)))
order_prod_quantity_std[is.na(order_prod_quantity_std$up_order_quantity_sd_grp),]$up_order_quantity_sd_grp <- 5
# user_prod_addToCart_sd, if only one order made by a user for a product, sd is missing, impute them as separate group
user_prod_addToCart_sd$up_addToCart_sd_grp <-
ifelse(user_prod_addToCart_sd$user_prod_addToCart_pct_sd < quantile(user_prod_addToCart_sd$user_prod_addToCart_pct_sd,na.rm = T,probs = 0.25),1,
ifelse(user_prod_addToCart_sd$user_prod_addToCart_pct_sd < quantile(user_prod_addToCart_sd$user_prod_addToCart_pct_sd,na.rm = T,probs = 0.5),2,
ifelse(user_prod_addToCart_sd$user_prod_addToCart_pct_sd < quantile(user_prod_addToCart_sd$user_prod_addToCart_pct_sd,na.rm = T,probs = 0.75),3,4)))
user_prod_addToCart_sd$up_addToCart_sd_grp <- ifelse(is.na(user_prod_addToCart_sd$up_addToCart_sd_grp),5,
user_prod_addToCart_sd$up_addToCart_sd_grp)
user_prod_addToCart_sd$up_addToCart_pct_sd_grp <-
ifelse(user_prod_addToCart_sd$user_prod_addToCart_pct_sd < quantile(user_prod_addToCart_sd$user_prod_addToCart_pct_sd,na.rm = T,probs = 0.25),1,
ifelse(user_prod_addToCart_sd$user_prod_addToCart_pct_sd < quantile(user_prod_addToCart_sd$user_prod_addToCart_pct_sd,na.rm = T,probs = 0.5),2,
ifelse(user_prod_addToCart_sd$user_prod_addToCart_pct_sd < quantile(user_prod_addToCart_sd$user_prod_addToCart_pct_sd,na.rm = T,probs = 0.75),3,4)))
user_prod_addToCart_sd$up_addToCart_pct_sd_grp <- ifelse(is.na(user_prod_addToCart_sd$up_addToCart_pct_sd_grp),5,
user_prod_addToCart_sd$up_addToCart_pct_sd_grp)
# up_days_interval_sd, if only two orders containing the products being made by user, the average interval sd is missing, impute as separate group
up_days_interval_sd$up_days_interval_sd_grp <-
ifelse(up_days_interval_sd$up_days_interval_sd < quantile(up_days_interval_sd$up_days_interval_sd,na.rm = T,probs = 0.25),1,
ifelse(up_days_interval_sd$up_days_interval_sd < quantile(up_days_interval_sd$up_days_interval_sd,na.rm = T,probs = 0.5),2,
ifelse(up_days_interval_sd$up_days_interval_sd < quantile(up_days_interval_sd$up_days_interval_sd,na.rm = T,probs = 0.75),3,4)))
up_days_interval_sd$up_days_interval_sd_grp <- ifelse(is.na(up_days_interval_sd$up_days_interval_sd_grp),5,
up_days_interval_sd$up_days_interval_sd_grp)
# merge all together
stability <- order_prod_quantity_std %>% inner_join(up_days_interval_sd,by=c("user_id","product_id")) %>%
inner_join(user_prod_addToCart_sd,by=c("user_id","product_id")) %>%
inner_join(user_prod_interval,by=c("user_id","product_id")) %>%
left_join(order_quantity_std,by="user_id") %>%
left_join(user_interval_sd,by="user_id")
stability <- stability %>% select(-user_prod_order_quantity_sd,-up_days_interval_sd,-user_prod_addToCart_sd,-user_prod_addToCart_pct_sd)
summary(stability)
fwrite(stability,"../Processed Data/stability_metrics.csv",row.names = F)
|
0eb0afa8d416ce0b312dc3ab288a44f772d93119
|
ed28855491d9b74da69b3b3841472ba63edbae82
|
/Heart Attact Analysis.R
|
fe82631d9827be4c399999a19fb48fcf458f26a4
|
[] |
no_license
|
Kush-Trivedi/Logistic-Regression-K-NN-for-Heart-Attack
|
50b9a776c3836b4c8b79e1cea23fa0f68e39250a
|
1ac86c9ed8053977179608d358e0c0ba0f810418
|
refs/heads/master
| 2023-08-04T19:03:00.847019
| 2021-09-23T04:16:44
| 2021-09-23T04:16:44
| 409,444,026
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,834
|
r
|
Heart Attact Analysis.R
|
# According to the World Health Organization, a terminal sickness is one that has a steady
# progression towards Heart disease and is currently the world's deadliest disease.
#According to data, 68 percent of deaths were caused by chronic long-term diseases, with
#heart disease being the leading cause.
# As a Entry Level data scientist, I have use various predictor factors to try to generate a
#forecast about heart disease patients.
# Also,I have use Logistic regression and K-Nearest Neighbor to develop a model to predict whether
# the patients have heart disease or not for the analysis.
# Import Library
library(dplyr) # Data Manipulation
library(gtools) # Handling R Packages
library(gmodels) # Mean,bi-normal proportion & probability
library(ggplot2) # Data visualization
library(class) # Multidimensional Scaling for prediction,confusion matrix & K-N-N
library(tidyr) # Data Sorting
library(lattice) # Data visualization
library(caret) # Regression
library(e1071) # If you install caret sometimes R gives warring so better install it
library(rmdformats) # Ready to use HTML output
# Import Heart Data
getwd()
setwd("/Users/kushtrivedi/Downloads")
heartData <- read.csv("heart.csv")
# Explore Overview of Data
summary(heartData)
head(heartData)
tail(heartData)
glimpse(heartData)
# Note:
# age: Age of Patient
# sex: 0 = Female & 1 = Male
# cp: Levels of Chest Pain (0-3)
# trtbps: Blood Pressure at Resting (mm HG)
# fbs: blood sugar:- 1 means above 120 mg/dl & 0 means below 120 mg/dl
# restecg: result of electrocardiograph
# thalachh: Maximum Heartbeat Rate
# exng: exercised include angina:- 1 means YES & 0 means NO
# oldpeak: Exercise Relative to Rest
# slp: Slope
# caa: Number of the most blood vessel
# thall: Form of thalassemia
# output: 0 means NO-SICKNESS & 1 means Sickness
# Data Wrangling ( often used for changing data to its correct type)
heartData <- heartData %>%
mutate(cp = as.factor(cp),
restecg = as.factor(restecg),
slp = as.factor(slp),
caa = as.factor(caa),
thall = as.factor(thall),
sex = factor(sex, levels = c(0,1), labels = c("female", "male")),
fbs = factor(fbs, levels = c(0,1), labels = c("False", "True")),
exng = factor(exng, levels = c(0,1), labels = c("No", "Yes")),
output = factor(output, levels = c(0,1), labels = c("Health", "Not Health")))
# Cross verify
head(heartData)
glimpse(heartData)
# Check for Missing Value (In this case there no Missing Value)
colSums(is.na(heartData))
# Data Pre-Processing (Mind-Map the model by checking the proportion for required field)
prop.table(table(heartData$sex)) # Proportion
table(heartData$sex) # Total (96 + 207) = 303 Cross- Verification
prop.table(table(heartData$output)) # Proportion
table(heartData$output) # Total (138 + 165) = 303 Cross- Verification
#Cross Validation (Try and Test) and after testing we will use that data to visualization
set.seed(101)
index <- sample(nrow(heartData),nrow(heartData) * 0.7)
# Data Try
try_Heart_Data <- heartData[index,]
# Data Test
test_Heart_Data <- heartData[-index,]
# Lets Check weather TEST Data is able to fir our model if not we will
# need to do cross validation again
prop.table(table(heartData$output)) # Without Cross validation
prop.table(table(try_Heart_Data$output)) # With Cross validation
# Data Modelling
# Create a Model by using TRY model and play with important variables such as sex,output,cp etc...
heart_Model_1 <- glm(formula = output ~ sex + cp + fbs + thall,
family = "binomial", data = try_Heart_Data)
# "binomial" is a function and will return Deviance, Coefficients and Dispersion shown bellow
summary(heart_Model_1)
# By watching it carefully some of variables are not necessary for our model so we create all
# variables step by step and will create a better model for that there is simple way to do
# that see bellow
# Model without Predictor
heart_Model_NoPredictor <- glm(output ~ 1, family = "binomial", data = try_Heart_Data)
# Model without Predictor
heart_Model_AllPredictor <- glm(output ~ ., family = "binomial", data = try_Heart_Data)
# Step wise regression Forward, Backward & Both for All Predictor Heart Model
# Backward
heart_Model_Backward <- step(object = heart_Model_AllPredictor,
direction = "backward", trace = F)
# Forward
heart_Model_Forward <- step(object = heart_Model_AllPredictor,
scope = list(lower = heart_Model_NoPredictor,
upper = heart_Model_AllPredictor),
direction = "forward",trace = F)
# Backward & Forward Both
heart_Model_BackwardForward <- step(object = heart_Model_AllPredictor,
scope = list(lower = heart_Model_NoPredictor,
upper = heart_Model_AllPredictor),
direction = "both",trace = F)
# Lets See summary of each model
# Backward and Backward-Forward has same Residual Deviance = 122.59 and AIC = 156.59
# While forward has RD = 120.23 and AIC = 166.23
summary(heart_Model_Backward)
summary(heart_Model_Forward)
summary(heart_Model_BackwardForward)
# Prediction
# So lets predict the model by using Backward-Forward and will use TEST data to predict
test_Heart_Data$prediction <- predict(heart_Model_BackwardForward,
type = "response",
newdata = test_Heart_Data)
# Create Plot for prediction
test_Heart_Data %>%
ggplot(aes(x=prediction)) +
geom_density() +
labs(title = "Prediction Data Probabilities") +
theme_gray()
# According to result it inclines more towards 1 in output(column of table)
# which means (Not Health)
# Lets have more clear view by comparing it
prediction_DataFrame <- predict(heart_Model_BackwardForward,
type = "response",
newdata = test_Heart_Data)
result_Prediction_DataFrame <- ifelse(prediction_DataFrame >= 0.5,
"Not Health","Health")
#Override our result_Prediction_Data-Frame to test_Heart_Data$prediction
test_Heart_Data$prediction <- result_Prediction_DataFrame
#Overview of comparison(More occurrence of Not Health) try using different head and see
test_Heart_Data %>%
select(output, prediction) %>%
head(12)
# Model Evaluation (how good our model had predict: Accuracy (able to predict sensitivity)
# & Specificity(able to predict precision) by creating confusion matrix)
confusion_Matrix <- confusionMatrix(as.factor(result_Prediction_DataFrame),
reference = test_Heart_Data$output,
positive = "Not Health")
confusion_Matrix
# Not Health of Not Health = 42
# Health of Not Health = 5
# Health of Health = 35
#Not Health of Health = 9
# Lest make that it more understandable
recall <- round(42/(42+5),3)
specificity <- round(35/(35+9),3)
precision <- round(42/(42+11),3)
accuracy <- round((42+35)/(42+35+9+5),3)
matrix <- cbind.data.frame(accuracy,recall,specificity,precision)
# More Clear Result
matrix
# Model Interpretation:
# Transforming odd value probabilities and analyzing coefficient of model to see
# Positive class probability
# Probability
heart_Model_BackwardForward$coefficients %>%
inv.logit() %>% # Transform odd values
data.frame()
# Prediction 1: Males have a 18.8 percent chance of being diagnosed with heart disease.
# Prediction 2: People with a high level of severe pain (cp = 3) have a 90 percent chance of
# developing heart disease.
# K - Nearest Neighbor
# Data Wrangling : as we will use K - Nearest Neighbor in order to that we will create
# new data frame consisting of dummy variable that we will predict to our output variable
dummy_DataFrame <- dummyVars("~output + sex + cp + trtbps + chol + fbs + restecg + thalachh +
exng + oldpeak + slp + caa + thall", data = heartData)
# Create new data frame
dummy_DataFrame <- data.frame(predict(dummy_DataFrame,newdata = heartData))
# Let's check structure of dummy data frame
str(dummy_DataFrame)
dummy_DataFrame$output.Health <- NULL
dummy_DataFrame$sex.female <- NULL
dummy_DataFrame$fbs.False <- NULL
dummy_DataFrame$exng.No <- NULL
head(dummy_DataFrame)
# Cross Validation: K - Nearest Method
# it has different approach in compare to Logistic regression
# Here we will split Predictor and output variable(column in heart table) into try & test
set.seed(101)
# Predictor
try_Predictor_Knn_Dummy_Heart_Data_X <- dummy_DataFrame[index, -1]
test_Predictor_Knn_Dummy_Heart_Data_X <- dummy_DataFrame[-index, -1]
# Output
try_Predictor_Knn_Dummy_Heart_Data_Y <- dummy_DataFrame[index, 1]
test_Predictor_Knn_Dummy_Heart_Data_Y <- dummy_DataFrame[-index, 1]
# Choose k by a common method: square root of data count
sqrt(nrow(try_Predictor_Knn_Dummy_Heart_Data_X))
# K will be 14.56044 = 14 and will use for prediction in next step
# Create K - Nearest Neighbor Prediction
prediction_Knn <- knn(train = try_Predictor_Knn_Dummy_Heart_Data_X,
test = test_Predictor_Knn_Dummy_Heart_Data_X,
cl = try_Predictor_Knn_Dummy_Heart_Data_Y,
k = 14)
# We will Transform Knn prediction into data frame and rename to orignal label
prediction_Knn <- prediction_Knn %>%
as.data.frame() %>%
mutate(prediction_Knn = factor(prediction_Knn,
levels = c(0,1),
labels = c("Health", "Not Health")))%>%select(prediction_Knn)
# Same with confusion matrix
test_Predictor_Knn_Dummy_Heart_Data_Y <- test_Predictor_Knn_Dummy_Heart_Data_Y %>%
as.data.frame() %>%
mutate(output = factor(test_Predictor_Knn_Dummy_Heart_Data_Y,
levels = c(0,1),
labels = c("Health", "Not Health")))%>%select(output)
# Create Confusion Matrix
confusion_Matrix_Knn <- confusionMatrix(prediction_Knn$prediction_Knn,
reference = test_Predictor_Knn_Dummy_Heart_Data_Y$output,
positive = "Not Health")
confusion_Matrix_Knn
# Not Health of Not Health = 37
# Health of Not Health = 10
# Health of Health = 29
#Not Health of Health = 15
# Lest make that it more understandable
recall_Knn <- round(37/(37+10),3)
specificity_Knn <- round(29/(29+15),3)
precision_Knn <- round(37/(37+15),3)
accuracy_Knn <- round((37+29)/(37+29+15+10),3)
matrix_Knn <- cbind.data.frame(accuracy_Knn,recall_Knn,specificity_Knn,precision_Knn)
# More Clear Result
matrix_Knn
# Overall Prediction of K-N-N Model is 72.5 % accuracy
# Not Health person are 78 %
# Health person are 65.9 %
# Precision for "Not Health" is 71.2 % from our K-N-N Prediction
# Compare Logistic Regression and K-N-N
#LR
matrix
#K-N-N
matrix_Knn
# We can see that K-N-N has better specificity and precision in compare to Logistic Regression
# Logistic Regression has better accuracy and recall
#Conclusion:
# If a doctor has to chose treat people with heart disease differently, we would go with
# better precision.
# if a doctor merely wanted to diagnose as many people as possible with heart disease while
# ignoring the incorrect categorization, then we will use best recall.
# Q-Plot Scatter for Age and Cholesterol > 200
heart_Cholesterol_DataFrame_Above_200 <- heartData[heartData$chol > 200,]
qplot(data = heart_Cholesterol_DataFrame_Above_200, x = age, y= chol,colour= age, size=I(5),alpha = I(0.7),main = "Age of people where Cholesterol > 200",xlab = "Age",ylab = "Cholesterol")
# Q-Plot Scatter for Age and Cholesterol < 200
heart_Cholesterol_DataFrame_Below_200 <- heartData[heartData$chol < 200,]
qplot(data = heart_Cholesterol_DataFrame_Below_200, x = age, y= chol,colour= age, size=I(5),alpha = I(0.7),main = "Age of people where Cholesterol > 200",xlab = "Age",ylab = "Cholesterol")
#Q-Plot Box plot for Chest pain level below age of 50
heart_CestPain_Data_Age_Below_50 <- heartData[heartData$age < 50,]
qplot(data = heart_CestPain_Data_Age_Below_50,x = cp, y = age,colour = cp,size=I(1),alpha = I(0.7), geom = "boxplot",main = "Chest Pain level of people under age of 50.",xlab = "Chest Pain", ylab = "Age")
#Q-Plot Box plot for Chest pain level above age of 50
heart_CestPain_Data_Age_Above_50 <- heartData[heartData$age > 50,]
qplot(data = heart_CestPain_Data_Age_Above_50,x = cp, y = age,colour = cp,size=I(1),alpha = I(0.7), geom = "boxplot",main = "Chest Pain level of people above age of 50.",xlab = "Chest Pain", ylab = "Age")
|
1aff8a0cc19deababddf61c95d8f682404987e23
|
7f823e306008025c5697dc5a88f5a9b1f54eaa30
|
/man/clean.Rd
|
f0570806c5442ea234171daf89733037a9c1acbe
|
[] |
no_license
|
renlund/proh
|
0a0aced92d6755ea1aada721f68be76997d62278
|
041f68b7cb9ad730b2fd47d8d5e7e2e001afe803
|
refs/heads/master
| 2023-04-14T01:43:06.382209
| 2023-03-23T14:05:46
| 2023-03-23T14:05:46
| 21,493,531
| 0
| 0
| null | 2015-08-20T07:26:25
| 2014-07-04T10:16:31
|
R
|
UTF-8
|
R
| false
| true
| 275
|
rd
|
clean.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clean.r
\name{clean}
\alias{clean}
\title{Clean LaTeX files}
\usage{
clean()
}
\description{
Remove unnecessary LaTeX files
}
\details{
Sometimes LaTeX leaves a mess...
}
\author{
Henrik Renlund
}
|
f26c5f0a3be954ad0de840db96aa1849f193d982
|
61045b2f3c92394920f394fff7c5fbfbccb97d01
|
/pipeline/secondary/inspect.R
|
42d80a3f0d60af8184ee816657869d5b1f8da4b8
|
[
"Apache-2.0"
] |
permissive
|
arnaudceol/htsflow
|
d82f8519b352277c2aa1ead21673474456bf2729
|
e4fdd5ba9d4457ef56fc331df9acad754cb42530
|
refs/heads/master
| 2020-05-21T15:16:12.231028
| 2016-07-13T12:19:32
| 2016-07-13T12:19:32
| 48,240,725
| 3
| 3
| null | 2016-04-28T09:01:29
| 2015-12-18T14:55:42
|
JavaScript
|
UTF-8
|
R
| false
| false
| 11,730
|
r
|
inspect.R
|
# Copyright 2015-2016 Fondazione Istituto Italiano di Tecnologia.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
inspect <- function( IDsec ){
method = "inspect"
initSpecies()
loginfo ("Start INSPEcT")
## exp_name <- "x"
values <- getSecondaryData(IDsec, method)
sqlType <- paste( "SELECT type FROM inspect WHERE secondary_id=",IDsec," LIMIT 1;")
tmp <- dbQuery(sqlType)
inspectType <- tmp[2]
sqlGenome <- paste( "SELECT ref_genome FROM inspect, primary_analysis, sample WHERE primary_id = primary_analysis.id AND sample.id = sample_id AND secondary_id=",IDsec," LIMIT 1;")
tmp <- dbQuery(sqlGenome)
genome <- tmp[2]
txdbLib <- htsflowGenomeVersionsToTxdbLib[[genome]]
library(INSPEcT)
library(txdbLib, character.only=TRUE)
txdb <-get(txdbLib)
labeling_time <- as.numeric(values$labeling_time[1])
degDuringPulse <- sapply(values$deg_during_pulse, function (x) if (x[1] == "0") { FALSE ;} else{ TRUE;})[1]
modeling_rates <- sapply(values$modeling_rates, function (x) if (x[1] == "0") { FALSE ;} else{ TRUE;})[1]
counts_filtering <- as.numeric(values$counts_filtering[1])
bamFolder <- getHTSFlowPath("HTSFLOW_ALN")
if (inspectType == "time_course") {
# txdb --- TranscriptDB object
# timepoints ---- numeric vector
# bamfiles_4sU ---- character vector -> file path
# bamfiles_RNAtotal ---- --> character vector
# labeling_time ---- numeric -->
# degDuringPulse --- logical, default=FALSE
# modeling_rates --- logical, default=FALSE
# counts_filtering --- numeric, default=5
fourSuBams <- sapply(1:length(values$primary_id), function(x) paste(bamFolder, values$primary_id[x], ".bam", sep="") )
rnaTotalBams <- sapply(1:length(values$rnatotal_id), function(x) paste(bamFolder, values$primary_id[x], ".bam", sep="") )
timepoints <- as.numeric(values$timepoint)
## NOTE: timepoints, bamfiles_4sU and bamfiles_RNAtotal must have the same length
## quantification of intron and exon features for each bamfile
quantifyExInt <- makeRPKMs(txdb, fourSuBams, rnaTotalBams)
rpkms <- quantifyExInt$rpkms
counts <- quantifyExInt$counts
# filtering based on counts?
exonFeatures <- rownames(rpkms$foursu_exons)
intronFeatures <- rownames(rpkms$foursu_introns)
exon_filt <- exonFeatures[apply(counts$foursu$exonCounts>=counts_filtering,1,all) &
apply(counts$total$exonCounts>=counts_filtering,1,all)]
intron_filt <- intronFeatures[apply(counts$foursu$intronCounts>=counts_filtering,1,all) &
apply(counts$total$intronCounts>=counts_filtering,1,all)]
intron_filt <- intersect(intron_filt, exon_filt)
rpkms_foursu_exons <- rpkms$foursu_exons[exon_filt,,drop=FALSE]
rpkms_total_exons <- rpkms$total_exons[exon_filt,,drop=FALSE]
rpkms_foursu_introns <- rpkms$foursu_introns[intron_filt,,drop=FALSE]
rpkms_total_introns <- rpkms$total_introns[intron_filt,,drop=FALSE]
counts_foursu_exons <- counts$foursu$exonCounts[exon_filt,,drop=FALSE]
counts_total_exons <- counts$total$exonCounts[exon_filt,,drop=FALSE]
counts_foursu_introns <- counts$foursu$intronCounts[intron_filt,,drop=FALSE]
counts_total_introns <- counts$total$intronCounts[intron_filt,,drop=FALSE]
## quantification of rates
inspectIds <- newINSPEcT(
timepoints, labeling_time,
rpkms_foursu_exons, rpkms_total_exons,
rpkms_foursu_introns, rpkms_total_introns,
degDuringPulse=degDuringPulse, BPPARAM=SerialParam()
)
## modeling of rates
if( modeling_rates ) {
inspectIds_mod <- modelRates(inspectIds, seed=1)
}
#### inspect specific output
#### matrices with row=genes and columns=unique(timepoints)
sythesis <- ratesFirstGuess(inspectIds, 'synthesis')
degradation <- ratesFirstGuess(inspectIds, 'degradation')
processing <- ratesFirstGuess(inspectIds, 'processing')
pre_mRNA <- ratesFirstGuess(inspectIds, 'preMRNA')
total_mRNA <- ratesFirstGuess(inspectIds, 'total')
if( modeling_rates ) {
modeled_sythesis <- viewModelRates(inspectIds, 'synthesis')
modeled_degradation <- viewModelRates(inspectIds, 'degradation')
modeled_processing <- viewModelRates(inspectIds, 'processing')
modeled_pre_mRNA <- viewModelRates(inspectIds, 'preMRNA')
modeled_total_mRNA <- viewModelRates(inspectIds, 'total')
}
#### other output
#### matrices with row=genes and columns=timepoints
# creating output folder
outFolder <- paste ( getHTSFlowPath("HTSFLOW_SECONDARY"), IDsec,"/", sep="" )
loginfo( paste("Create output folder ",outFolder," --",sep="") )
createDir(outFolder, recursive = TRUE)
setwd( outFolder )
saveRDS( sythesis, paste0( "synthesis.rds" ) )
saveRDS( degradation, paste0( "degradation.rds" ) )
saveRDS( processing, paste0( "processing.rds" ) )
saveRDS( pre_mRNA, paste0( "pre_mRNA.rds" ) )
saveRDS( total_mRNA, paste0( "total_mRNA.rds" ) )
# update the DB with the secondary analysis status complete
setSecondaryStatus( IDsec=IDsec, status='completed', endTime=T, outFolder=T )
} else {
# Get conditions:
condition1 <- unique(values$cond)[1]
condition2 <- unique(values$cond)[2]
# For each row, add in the good condition one
bamfiles_4sU_cond1 <- c()
bamfiles_RNAtotal_cond1 <- c()
bamfiles_4sU_cond2 <- c()
bamfiles_RNAtotal_cond2 <- c()
for (x in 1:length(values$primary_id)) {
if (values$cond[x] == condition1) {
bamfiles_4sU_cond1 <- c(bamfiles_4sU_cond1, paste(bamFolder, values$primary_id[x], ".bam", sep=""));
bamfiles_RNAtotal_cond1 <- c(bamfiles_RNAtotal_cond1, paste(bamFolder, values$rnatotal_id[x], ".bam", sep=""));
} else {
bamfiles_4sU_cond2 <- c(bamfiles_4sU_cond2, paste(bamFolder, values$primary_id[x], ".bam", sep=""));
bamfiles_RNAtotal_cond2 <- c(bamfiles_RNAtotal_cond2, paste(bamFolder, values$rnatotal_id[x], ".bam", sep=""));
}
}
quantifyExInt_cond1 <- makeRPKMs(txdb, bamfiles_4sU_cond1, bamfiles_RNAtotal_cond1)
quantifyExInt_cond2 <- makeRPKMs(txdb, bamfiles_4sU_cond2, bamfiles_RNAtotal_cond2)
rpkms_cond1 <- quantifyExInt_cond1$rpkms
counts_cond1 <- quantifyExInt_cond1$counts
rpkms_cond2 <- quantifyExInt_cond2$rpkms
counts_cond2 <- quantifyExInt_cond2$counts
## filtering based on counts
exonFeatures <- rownames(rpkms_cond1$foursu_exons)
intronFeatures <- rownames(rpkms_cond1$foursu_introns)
exon_filt <- exonFeatures[apply(counts_cond1$foursu$exonCounts>counts_filtering,1,all) &
apply(counts_cond1$total$exonCounts>counts_filtering,1,all) &
apply(counts_cond2$foursu$exonCounts>counts_filtering,1,all) &
apply(counts_cond2$total$exonCounts>counts_filtering,1,all)]
intron_filt <- intronFeatures[apply(counts_cond1$foursu$intronCounts>counts_filtering,1,all) &
apply(counts_cond1$total$intronCounts>counts_filtering,1,all) &
apply(counts_cond2$foursu$intronCounts>counts_filtering,1,all) &
apply(counts_cond2$total$intronCounts>counts_filtering,1,all)]
intron_filt <- intersect(intron_filt, exon_filt)
# cond 1
rpkms_cond1_foursu_exons <- rpkms_cond1$foursu_exons[exon_filt,,drop=FALSE]
rpkms_cond1_total_exons <- rpkms_cond1$total_exons[exon_filt,,drop=FALSE]
rpkms_cond1_foursu_introns <- rpkms_cond1$foursu_introns[intron_filt,,drop=FALSE]
rpkms_cond1_total_introns <- rpkms_cond1$total_introns[intron_filt,,drop=FALSE]
counts_cond1_foursu_exons <- counts_cond1$foursu$exonCounts[exon_filt,,drop=FALSE]
counts_cond1_total_exons <- counts_cond1$total$exonCounts[exon_filt,,drop=FALSE]
counts_cond1_foursu_introns <- counts_cond1$foursu$intronCounts[intron_filt,,drop=FALSE]
counts_cond1_total_introns <- counts_cond1$total$intronCounts[intron_filt,,drop=FALSE]
# cond 2
rpkms_cond2_foursu_exons <- rpkms_cond2$foursu_exons[exon_filt,,drop=FALSE]
rpkms_cond2_total_exons <- rpkms_cond2$total_exons[exon_filt,,drop=FALSE]
rpkms_cond2_foursu_introns <- rpkms_cond2$foursu_introns[intron_filt,,drop=FALSE]
rpkms_cond2_total_introns <- rpkms_cond2$total_introns[intron_filt,,drop=FALSE]
counts_cond2_foursu_exons <- counts_cond2$foursu$exonCounts[exon_filt,,drop=FALSE]
counts_cond2_total_exons <- counts_cond2$total$exonCounts[exon_filt,,drop=FALSE]
counts_cond2_foursu_introns <- counts_cond2$foursu$intronCounts[intron_filt,,drop=FALSE]
counts_cond2_total_introns <- counts_cond2$total$intronCounts[intron_filt,,drop=FALSE]
## quantification of rates
timepoints <- rep(0, length(bamfiles_4sU_cond1))
inspectIds1 <- newINSPEcT(
timepoints, labeling_time,
rpkms_cond1_foursu_exons, rpkms_cond1_total_exons,
rpkms_cond1_foursu_introns, rpkms_cond1_total_introns,
degDuringPulse=degDuringPulse,BPPARAM=SerialParam()
)
timepoints <- rep(0, length(bamfiles_4sU_cond2))
inspectIds2 <- newINSPEcT(
timepoints, labeling_time,
rpkms_cond2_foursu_exons, rpkms_cond2_total_exons,
rpkms_cond2_foursu_introns, rpkms_cond2_total_introns,
degDuringPulse=degDuringPulse,BPPARAM=SerialParam()
)
## differential analysis
diffrates <- compareSteady(inspectIds1, inspectIds2)
#### inspect specific output
#### matrices with row=genes and columns=10
diff_synthesis <- synthesis(diffrates)
diff_processing <- processing(diffrates)
diff_degradation <- degradation(diffrates)
#### other output
#### matrices with row=genes and columns=4
all_rpkms_cond1 <- list(
foursu_exons = rpkms_cond1_foursu_exons[,1]
,total_exons = rpkms_cond1_total_exons[,1]
,foursu_introns = rpkms_cond1_foursu_introns[,1]
,total_introns = rpkms_cond1_total_introns[,1]
)
all_counts_cond1 <- list(
foursu_exons = counts_cond1_foursu_exons[,1]
,total_exons = counts_cond1_total_exons[,1]
,foursu_introns = counts_cond1_foursu_introns[,1]
,total_introns = counts_cond1_total_introns[,1]
)
all_rpkms_cond2 <- list(
foursu_exons = rpkms_cond2_foursu_exons[,1]
,total_exons = rpkms_cond2_total_exons[,1]
,foursu_introns = rpkms_cond2_foursu_introns[,1]
,total_introns = rpkms_cond2_total_introns[,1]
)
all_counts_cond2 <- list(
foursu_exons = counts_cond2_foursu_exons[,1]
,total_exons = counts_cond2_total_exons[,1]
,foursu_introns = counts_cond2_foursu_introns[,1]
,total_introns = counts_cond2_total_introns[,1]
)
# creating output folder
outFolder <- paste ( getHTSFlowPath("HTSFLOW_SECONDARY"), IDsec,"/", sep="" )
loginfo( paste("Create output folder ",outFolder," --",sep="") )
createDir(outFolder, recursive = TRUE)
setwd( outFolder )
saveRDS( diffrates, paste0( "diffrates.rds" ) )
saveRDS( diff_synthesis, paste0( "diff_synthesis.rds" ) )
saveRDS( diff_processing, paste0( "diff_processing.rds" ) )
saveRDS( diff_degradation, paste0( "diff_degradation.rds" ) )
saveRDS( all_rpkms_cond1, paste0( "all_rpkms_cond1.rds" ) )
saveRDS( all_rpkms_cond2, paste0( "all_rpkms_cond2.rds" ) )
saveRDS( all_counts_cond1, paste0( "all_counts_cond1.rds" ) )
saveRDS( all_counts_cond2, paste0( "pre_all_counts_cond2.rds" ) )
# update the DB with the secondary analysis status complete
setSecondaryStatus( IDsec=IDsec, status='completed', endTime=T, outFolder=T )
}
}
|
012e7ebbb366deb48f4f3780ea1fb7729cdb7f94
|
e3f8cf57114e8918c6df4e38ac494a4ba817aacb
|
/tests/testthat/test_read_messages.R
|
d778caf1b8949927a4e9d6853933262e5ce7276a
|
[] |
no_license
|
MangoTheCat/mailman
|
c11dfebef49da305910bf7c497d18051daf5dfcb
|
30cfacbfc48931c8f5d3b4c3ff92607d5d8ed46a
|
refs/heads/master
| 2020-03-21T05:17:00.647316
| 2018-06-21T10:09:29
| 2018-06-21T10:09:29
| 138,153,015
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 712
|
r
|
test_read_messages.R
|
context("Reading messages")
test_that("read_messages read and parses messages from a mailbox", {
if (!reticulate::py_module_available("mailbox"))
skip("mailbox not available for testing")
messages <- read_messages("data/test_mailbox.mbox", type="mbox")
result <- tibble::tibble(From=c("foo@bar.com", NA, "foo@bar.com"),
To=c("spam@eggs.co.uk", "spam@eggs.co.uk",
"spam@eggs.co.uk"),
Date=c("2018-01-01 12:00", "2018-01-01 12:00",
"2018-01-01 12:00"),
Body=c("This is a test\n", "This is a second test\n",
"This is a fourth test"))
expect_equal(result, messages)
})
|
e711c07c74ae9ee5ffda60ce294c6e5a5086d5aa
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/NISTunits/examples/NISTmillimeterTOpointComputer.Rd.R
|
92ebfbf10c31ba2cd9dc48c2d2c03a9585c00091
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 224
|
r
|
NISTmillimeterTOpointComputer.Rd.R
|
library(NISTunits)
### Name: NISTmillimeterTOpointComputer
### Title: Convert millimeter to point
### Aliases: NISTmillimeterTOpointComputer
### Keywords: programming
### ** Examples
NISTmillimeterTOpointComputer(10)
|
922d5b125516792aabf57b2555aa8e648e905254
|
8d4dfa8b6c11e319fb44e578f756f0fa6aef4051
|
/man/getNbRolledUpFeatures.Rd
|
9b11dcb461427295b80f25a1d38c206622f05312
|
[] |
no_license
|
eahrne/SafeQuant
|
ce2ace309936b5fc2b076b3daf5d17b3168227db
|
01d8e2912864f73606feeea15d01ffe1a4a9812e
|
refs/heads/master
| 2021-06-13T02:10:58.866232
| 2020-04-14T10:01:43
| 2020-04-14T10:01:43
| 4,616,125
| 4
| 4
| null | 2015-11-03T20:12:03
| 2012-06-10T15:35:25
|
R
|
UTF-8
|
R
| false
| true
| 496
|
rd
|
getNbRolledUpFeatures.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ExpressionAnalysis.R
\name{getNbRolledUpFeatures}
\alias{getNbRolledUpFeatures}
\title{Get number rolled up features per row}
\usage{
getNbRolledUpFeatures(eset, method = "vector")
}
\arguments{
\item{eset}{ExpressionSet}
\item{method}{c("vector","matrix") default vector}
}
\value{
matrix
}
\description{
Get number rolled up features per row
}
\note{
No note
}
\examples{
print("No examples")
}
\seealso{
No note
}
|
e2a916786fecb22b12140eee3f99a90233391900
|
7dfd5181131882a16d24c4830f0cca24d1057d4f
|
/R/cleanNZGridEA.R
|
85505286c74be4c48d2ccdabb0bcd2bdff3d72b0
|
[
"Apache-2.0"
] |
permissive
|
CfSOtago/gridCarbon
|
b4239182a67e453dba541e81571f5b7f5b731c48
|
25d433054dca935aa434e4d89de0fc826373e2fc
|
refs/heads/master
| 2022-04-30T18:18:22.389263
| 2022-04-01T11:14:03
| 2022-04-01T11:14:03
| 157,133,798
| 2
| 2
|
Apache-2.0
| 2022-04-01T11:14:04
| 2018-11-12T00:05:06
|
HTML
|
UTF-8
|
R
| false
| false
| 1,275
|
r
|
cleanNZGridEA.R
|
#' \code{cleanNZGridEA} cleans up the raw grid gen file from the NZ EA data website
#'
#' These are not in pretty form so we clean them up to a long form file and fix the dateTimes.
#'
#' Note that rDateTime will be NA for the DST breaks which equate to TP49/50. We really dislike DST breaks.
#'
#' @param dt the data.table to clean up
#'
#' @import data.table
#' @import lubridate
#' @author Ben Anderson, \email{b.anderson@@soton.ac.uk} (original)
#' @export
#' @family data
#' @family grid
#' @family NZ
#'
cleanNZGridEA <- function(dt){
# cleans & returns a dt
dtl <- gridCarbon::reshapeGenDT(dt) # make long
dtl <- gridCarbon::setGridGenTimePeriod(dtl) # set time periods to something intelligible as rTime
dtl[, rDate := as.Date(Trading_date)] # fix the dates so R knows what they are
dtl[, rDateTime := lubridate::ymd_hms(paste0(rDate, rTime))] # set full dateTime. Parsing failures are TP49/59
# don't do this here - do it on data load (saves space)
#dtl[, rDateTimeNZT := lubridate::force_tz(rDateTime,
# tzone = "Pacific/Auckland")] # for safety in case run in another tz!
# there will be parse errors in the above due to TP49 & TP50
table(dtl[is.na(rDateTime)]$Time_Period)
return(dtl)
}
|
74990d6f16a857ddb54cca8f8dc20facbc743325
|
749f4c1b663c44a623b8e76747980701af51c8de
|
/code/lib/cleanup.R
|
1abd6a7e1db78b66fba26a9ea3497a85d62afbd0
|
[] |
no_license
|
fusioncl/SueldosGovScrape
|
ee7c68d73e9740a91e7f8946d0287225a9becb48
|
eb0bb55f6b857c54b794c573d07dc41538818e2b
|
refs/heads/master
| 2020-03-21T11:46:32.890393
| 2018-06-24T19:04:58
| 2018-06-24T19:04:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,288
|
r
|
cleanup.R
|
library(tidyverse)
library(stringr)
#### Extract ----
# Get list of files in directories
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
files.output <- list.files(path = '../output', recursive = TRUE)
files.output <- paste0('../output/', files.output)
# Process and put into a data.table
combine.csv <- function(file.list, file.text){
files.to.get <- file.list[grepl(file.text, file.list)]
df.list <- list()
counter <- 1
for(i in files.to.get){
df.list[[counter]] <- read.csv(i, header = FALSE)
counter <- counter + 1
}
return(df.list)
}
combine.csv.2 <- function(file.list, file.text){
files.to.get <- file.list[grepl(file.text, file.list)]
df <- do.call(bind_rows,
lapply(files.to.get,function(x){
data <- read.csv(x, header = FALSE, colClasses = 'character')
data$filename <- x
return(data)
})
)
return(df)
}
# Get the list of data frames
scraped.data <- combine.csv.2(files.output, 'scraped_data')
head(scraped.data)
unique(scraped.data$V1)
scraped.data[scraped.data$V1 == 'CORREA',]
unique(scraped.data$filename)
scraped.data[scraped.data$V1 == "HN JUN 2016: 526112, HN JUL 2016: 394584, HN AGO 2016: 426664, HN SEP 2016: 375336."
,]
|
151dc3993a2789203484ea47b4332f93208301d5
|
02633b4c1267a18ee0ac86e26a0af42da91c61e5
|
/server.R
|
a421660d6f278896ff6c1aad24a34e1234b7025e
|
[
"MIT"
] |
permissive
|
voltek62/titanicShiny
|
7c2786fcb55a2e15d6553473c5449180393046a4
|
dafd9a23460aa646c8f8787d16020cabab2a3af1
|
refs/heads/master
| 2016-09-05T17:38:17.212489
| 2015-06-14T20:38:53
| 2015-06-14T20:38:53
| 37,426,425
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,089
|
r
|
server.R
|
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
library(ggplot2)
library(randomForest)
library(C50)
library(dplyr)
set.seed(1)
train <- read.csv("train.csv", stringsAsFactors=FALSE)
extractFeatures <- function(data) {
features <- c("Pclass",
"Age",
"Sex",
"Parch",
"SibSp",
"Fare",
"Embarked",
"Survived")
fea <- data[,features]
fea$Age[is.na(fea$Age)] <- -1
fea$Fare[is.na(fea$Fare)] <- median(fea$Fare, na.rm=TRUE)
fea$Embarked[fea$Embarked==""] = "S"
fea$Sex <- as.factor(fea$Sex)
fea$Embarked <- as.factor(fea$Embarked)
return(fea)
}
getFeatures <- function(data) {
features <- c("Pclass",
"Age",
"Sex",
"Parch",
"SibSp",
"Fare",
"Embarked")
fea <- data[,features]
return(fea)
}
shinyServer(function(input, output) {
output$distPlot <- renderPlot({
train2 <- extractFeatures(train)
# Age
train2 <- train2[train2$Age>(input$age[[1]]) & train2$Age<(input$age[[2]]),]
# Fare
train2 <- train2[train2$Fare>(input$fare[[1]]) & train2$Fare<(input$fare[[2]]),]
# Pclass
if (input$pclass!="0") {
train2 <- train2[train2$Pclass==input$pclass,]
}
# Sex
if (input$sex!="0") {
train2 <- train2[train2$Sex==input$sex,]
}
#print(dim(train2))
if (input$algo=="rf") {
if(dim(train2)[1]>10)
{
rf <- randomForest(getFeatures(train2), as.factor(train2$Survived), importance=TRUE)
imp_rf <- importance(rf, type=1)
featureImportance_rf <- data.frame(Feature=row.names(imp_rf), Importance=imp_rf[,1])
ggplot(featureImportance_rf, aes(x=reorder(Feature, Importance), y=Importance)) +
geom_bar(stat="identity", fill="#53cfff") +
coord_flip() +
theme_light(base_size=20) +
xlab("") +
ylab("Importance") +
ggtitle("Random Forest - Feature Importance\n") +
theme(plot.title=element_text(size=18))
}
}
else {
if(dim(train2)[1]>30)
{
tm <- C5.0(x = getFeatures(train2), y = as.factor(train2$Survived),
control = C5.0Control(winnow = TRUE))
featureImportance_tm <-C5imp(tm, metric = "usage", pct = FALSE)
featureImportance_tm <- cbind(Feature = rownames(featureImportance_tm), featureImportance_tm)
ggplot(featureImportance_tm, aes(x=reorder(Feature, Overall), y=Overall)) +
geom_bar(stat="identity", fill="#53cccf") +
coord_flip() +
theme_light(base_size=20) +
xlab("") +
ylab("Importance") +
ggtitle("C5.0 Classifier - Feature Importance\n") +
theme(plot.title=element_text(size=18))
}
}
})
})
|
b7580aefd583b8fc762534bc154ee187d141eaf3
|
af5f1ed04b3d150beff264ea35e8393220c28466
|
/R/statements.r
|
cf34ac54ca9d38f5aba7f088c17aade226cfe349
|
[
"MIT"
] |
permissive
|
zamorarr/rlox
|
a98752171c9765653807e4075db06f60942e0aa7
|
cf11b213d072a7a3c9ccd46921640c8123ec6c91
|
refs/heads/main
| 2023-07-22T08:09:21.513147
| 2021-08-27T02:39:38
| 2021-08-27T02:39:38
| 391,798,133
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,098
|
r
|
statements.r
|
#' Statements
stmt <- function(x, subclass) {
structure(x, class = c(subclass, "lox_stmt", class(x)))
}
is_stmt <- function(x) inherits(x, "lox_stmt")
#' @param statements list of statements
#' @rdname stmt
stmt_block <- function(statements) {
x <- list(statements = statements)
stmt(x, "lox_stmt_block")
}
#' @param expression expression
#' @rdname stmt
stmt_expression <- function(expression) {
x <- list(expression = expression)
stmt(x, "lox_stmt_expression")
}
#' Function Declaration
#' @param name token
#' @param params list of tokens
#' @param body list of statements
stmt_function <- function(name, params, body) {
x <- list(name = name, params = params, body = body)
stmt(x, "lox_stmt_function")
}
#' If Statement
#' @param condition expression
#' @param then_branch statement
#' @param else_branch statement
#' @rdname stmt
stmt_if <- function(condition, then_branch, else_branch = NULL) {
x <- list(condition = condition, then_branch = then_branch, else_branch = else_branch)
stmt(x, "lox_stmt_if")
}
#' Print Statement
#' @param expression expression
#' @rdname stmt
stmt_print <- function(expression) {
x <- list(expression = expression)
stmt(x, "lox_stmt_print")
}
#' Return Statement
#' @param keyword token
#' @param value expression
stmt_return <- function(keyword, value) {
x <- list(keyword = keyword, value = value)
stmt(x, "lox_stmt_return")
}
#' Variable Statement
#' @param name token
#' @param initializer expression
#' @rdname stmt
stmt_variable <- function(name, initializer = NULL) {
x <- list(name = name, initializer = initializer)
stmt(x, "lox_stmt_variable")
}
#' @param condition expression
#' @param body statement
#' @rdname stmt
stmt_while <- function(condition, body) {
x <- list(condition = condition, body = body)
stmt(x, "lox_stmt_while")
}
#' @export
print.lox_stmt <- function(x, ...) {
cat(format(x, ...), "\n")
}
#' @export
format.lox_stmt_block <- function(x, pad = 0, ...) {
dash <- paste(rep("-", pad), collapse = "")
s <- sprintf("%s||-`{`", dash)
for (stmt in x$statements) {
s <- paste0(s, sprintf("\n%s|-%s", dash, format(stmt, pad = pad + 1L)))
}
s
}
#' @export
format.lox_stmt_expression <- function(x, pad = 0, ...) {
format(x$expression, pad = pad)
}
#' @export
format.lox_stmt_function <- function(x, pad = 0, ...) {
#name, params, body
dash <- paste(rep("-", pad), collapse = "")
s <- sprintf("%s||-fun %s", dash, format(x$name$lexeme, pad = pad))
# params
for (param in x$params) {
s <- paste0(s, sprintf("\n%s|-%s", dash, format(param, pad = pad + 1L)))
}
# block
s <- paste0(s, sprintf("\n%s|-%s", dash, format(x$body, pad = pad + 1L)))
s
}
#' @export
format.lox_stmt_if <- function(x, pad = 0, ...) {
dash <- paste(rep("-", pad), collapse = "")
s <- sprintf("%s||-`%s`\n%s\n%s",
dash, token_symbol$IF,
format(x$condition, pad = pad + 1L),
format(x$then_branch, pad = pad + 1L)
)
if (!is.null(x$else_branch)) {
s2 <- sprintf("\n%s|--`%s`\n%s",
dash, token_symbol$ELSE,
format(x$else_branch, pad = pad + 1L))
s <- paste0(s, s2)
}
s
}
format.lox_stmt_print <- function(x, pad = 0, ...) {
dash <- paste(rep("-", pad), collapse = "")
sprintf("%s||-`%s`\n%s",
dash, token_symbol$PRINT,
format(x$expression, pad = pad))
}
#' @export
format.lox_stmt_variable <- function(x, pad = 0, ...) {
dash <- paste(rep("-", pad), collapse = "")
s <- sprintf("%s||-`%s`\n%s|-- %s",
dash, token_symbol$VAR,
dash, x$name$lexeme)
# show initialization if there is one
if (!is.null(x$initializer)) {
s2 <- sprintf("\n%s", format(x$initializer, pad = pad))
s <- paste0(s, s2)
}
s
}
#' @export
format.lox_stmt_while <- function(x, pad = 0, ...) {
dash <- paste(rep("-", pad), collapse = "")
s <- sprintf("%s||-`%s`\n%s\n%s",
dash, token_symbol$WHILE,
format(x$condition, pad = pad + 1L),
format(x$body, pad = pad + 1L)
)
}
|
1652c6b57c336b5f1d5faf571f1253b7c9ba2b0c
|
e97886d0cae8a5f0b563c52e4a6dd8a3765bea2e
|
/R/numbers.R
|
87b6ded61cfdc123ffa818a699c3afbee8a2a74e
|
[
"MIT"
] |
permissive
|
Vaniza/funModeling
|
e6aac3f082b833e5c39250147c66ca327af93e71
|
7033adb501dddf6eb4dca86e8c253c24b7d9ddd7
|
refs/heads/master
| 2021-07-20T00:03:29.059793
| 2017-10-23T01:08:53
| 2017-10-23T01:08:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,836
|
r
|
numbers.R
|
#' @title Plotting numerical data
#' @description
#' One plot containing all the histograms for numerical variables. NA values will not be displayed.
#' @param data data frame
#' @param bins number of bars (bins) to plot each histogram, 10 by default
#' @examples
#' plot_num(mtcars)
#' @return plot containing all numerical variables
#' @export
plot_num <- function(data, bins=10)
{
## The concept of 'wide' and 'long' is crucial to understand how to pass the correct data to ggplot.
# The official documentation is quite clear about it: http://seananderson.ca/2013/10/19/reshape.html
wide_data=suppressMessages(melt(data))
p=ggplot(data = wide_data, mapping = aes(x = value)) +
geom_histogram(bins = bins, na.rm=T) + facet_wrap(~variable, scales = 'free_x') + aes(fill = variable) + guides(fill=FALSE)
p
}
#' @title Profiling numerical data
#' @description
#' Get a metric table with many indicators for all numerical variables, automatically skipping the non-numerical variables. Current metrics are:
#' mean, std_dev: standard deviation, all the p_XX: percentile at XX number, skewness, kurtosis, iqr: inter quartile range, variation_coef: the ratio of sd/mean, range_98 is the limit for which the 98% of fall, range_80 similar to range_98 but with 80%. All NA values will be skipped from calculations.
#' @param data data frame
#' @param print_results prints the result, TRUE by default.
#' @param digits the number of digits to show the result, 2 by default.
#' @examples
#' profiling_num(mtcars)
#' @return metrics table
#' @export
profiling_num <- function(data, print_results=T, digits=2)
{
options(digits=digits)
## If str_input is NA then ask for a single vector. True if it is a single vector
if(mode(data) %in% c("logical","numeric","complex","character"))
{
# creates a ficticious variable called 'var'
data=data.frame(var=data)
str_input="var"
}
## Keeping all non factor nor char variables
status=df_status(data, print_results = F)
vars_num=filter(status, type %in% c("numeric", "integer", "logical")) %>% .$variable
if(length(vars_num)==0)
stop("None of the input variables are numeric, integer nor logical")
data_num=select(data, one_of(vars_num))
if(missing(print_results))
print_results=T
df_res=data.frame(
mean=sapply(data_num, function(x) mean(x, na.rm=T)),
std_dev=sapply(data_num, function(x) sd(x, na.rm=T)),
p_01=sapply(data_num, function(x) quantile(x, probs = 0.01, na.rm=T)),
p_05=sapply(data_num, function(x) quantile(x, probs = 0.05, na.rm=T)),
p_10=sapply(data_num, function(x) quantile(x, probs = 0.10, na.rm=T)),
p_25=sapply(data_num, function(x) quantile(x, probs = 0.25, na.rm=T)),
p_50=sapply(data_num, function(x) quantile(x, probs = 0.50, na.rm=T)),
p_75=sapply(data_num, function(x) quantile(x, probs = 0.75, na.rm=T)),
p_90=sapply(data_num, function(x) quantile(x, probs = 0.90, na.rm=T)),
p_95=sapply(data_num, function(x) quantile(x, probs = 0.95, na.rm=T)),
p_99=sapply(data_num, function(x) quantile(x, probs = 0.99, na.rm=T)),
skewness=sapply(data_num, function(x) skewness(x, na.rm=T)),
kurtosis=sapply(data_num, function(x) kurtosis(x, na.rm=T)),
iqr=sapply(data_num, function(x) IQR(x, na.rm=T))
)
df_res$variation_coef=df_res$std_dev/df_res$mean
df_res$range_98=sprintf("[%s, %s]", round(df_res$p_01, digits), round(df_res$p_99, digits))
df_res$range_80=sprintf("[%s, %s]", round(df_res$p_10, digits), round(df_res$p_90, digits))
df_res=select(df_res, -p_10, -p_90)
## Create new variable for column name
df_res$variable=rownames(df_res)
rownames(df_res)=NULL
# reordering columns
df_res=select(df_res, variable, mean, std_dev, variation_coef, everything(), iqr, skewness, kurtosis)
## Print or return results
if(print_results) print(df_res) else return(df_res)
}
#' @title Compare two vectors
#' @description Obtaing coincident and not coincident elements between two vectors.
#' @param vector_x 1st vector to compare
#' @param vector_y 2nd vector to compare
#' @examples
#' v1=c("height","weight","age")
#' v2=c("height","weight","location","q_visits")
#' res=v_compare(vector_x=v1, vector_y=v2)
#' # Print the keys that didn't match
#' res
#' # Accessing the keys not present in
#' @return Correlation index for all data input variable
#' @export
v_compare <- function(vector_x, vector_y)
{
# vector_x=v1;vector_y=v2
df_x=data.frame(vector_x=vector_x, flag_x=1)
df_y=data.frame(vector_y=vector_y, flag_y=1)
df_x$vector_x=as.character(df_x$vector_x)
df_y$vector_y=as.character(df_y$vector_y)
merge_all=merge(df_x, df_y, by.x='vector_x', by.y='vector_y', all=T)
names(merge_all)[1]="key"
merge_all_nona=merge_all[!is.na(merge_all$flag_x) & !is.na(merge_all$flag_y),]
not_in_x=merge_all[is.na(merge_all$flag_x),]
not_in_y=merge_all[is.na(merge_all$flag_y),]
print(sprintf("Coincident in both: %s", nrow(merge_all_nona)))
print(sprintf("Rows not present in X: %s", nrow(not_in_x)))
print(sprintf("Rows not present in Y: %s", nrow(not_in_y)))
list_diff=list()
res=list(
present_in_both=merge_all_nona$key,
rows_not_in_X=not_in_x$key,
rows_not_in_Y=not_in_y$key
)
return(res)
}
#' @title Get correlation against target variable
#' @description Obtain correlation table for all variables against target variable. Only numeric variables are analyzed (factor/character are skippted automatically).
#' @param data data frame
#' @param str_target string variable to predict
#' @examples
#' correlation_table(data=heart_disease, str_target="has_heart_disease")
#' @return Correlation index for all data input variable
#' @export
correlation_table <- function(data, str_target)
{
data=as.data.frame(data)
data[, str_target]=as.numeric(data[, str_target])
data=data[, c(give_me_num_vars(data, str_target), str_target)]
df_cor=as.data.frame(round(cor(data, use="complete.obs" ),2))
df_cor$Variable = rownames(df_cor)
df_cor=df_cor[, names(df_cor) %in% c(str_target, "Variable")]
df_cor=df_cor[, c(2,1)]
df_cor_final=df_cor[order(-df_cor[,2]) , ]
row.names(df_cor_final) = NULL
return(df_cor_final)
}
#' @title Transform a variable into the [0-1] range
#' @description Range a variable into [0-1], assigning 0 to the min and 1 to the max of the input variable. All NA values will be removed.
#' @param var numeric input vector
#' @examples
#' range01(mtcars$cyl)
#' @return vector with the values scaled into the 0 to 1 range
#' @export
range01 <- function(var)
{
return((var-min(var, na.rm=T))/(max(var, na.rm=T)-min(var, na.rm=T)))
}
#' @title Frequency table for categorical variables
#' @description Retrieves the frequency and percentage for str_input
#' @param data input data containing the variable to describe
#' @param str_input string input variable (if empty, it runs for all numeric variable), it can take a single character value or a character vector.
#' @param plot flag indicating if the plot is desired, TRUE by default
#' @param na.rm flag indicating if NA values must be included in the analysis, FALSE by default
#' @param path_out path directory, if it has a value the plot is saved
#' @examples
#' freq(data=heart_disease$thal)
#' freq(data=heart_disease, str_input = c('thal','chest_pain'))
#' @return vector with the values scaled into the 0 to 1 range
#' @export
freq <- function(data, str_input=NA, plot=TRUE, na.rm=FALSE, path_out)
{
if(missing(path_out)) path_out=NA
## If str_input is NA then it runs for all variables in case it is not a single vector
if(sum(is.na(str_input)>0))
{
# True if it is a single vector
if(mode(data) %in% c("logical","numeric","complex","character"))
{
data=data.frame(var=data)
str_input="var"
} else {
## Keeping all categorical variables
data=data.frame(data)
status=df_status(data, print_results = F)
str_input=status[status$type %in% c("factor", "character"), 'variable']
if(length(str_input)==0)
stop("None of the input variables are factor nor character")
}
}
## Iterator
tot_vars=length(str_input)
if(tot_vars==1)
{
res=freq_logic(data = data, str_input=str_input, plot, na.rm, path_out = path_out)
return(res)
} else {
for(i in 1:tot_vars)
{
res=freq_logic(data = data, str_input=str_input[i], plot, na.rm, path_out = path_out)
print(res)
cat("", sep="\n")
}
return(sprintf("Variables processed: %s", paste(str_input, collapse = ", ")))
}
}
freq_logic <- function(data, str_input, plot, na.rm, path_out)
{
if(!na.rm) {
# if exclude = NULL then it adds the NA cases
tbl=data.frame(table(factor(data[,str_input], exclude = NULL)))
} else {
tbl=data.frame(table(data[,str_input]))
}
tbl=rename(tbl, category=Var1, frequency=Freq) %>% arrange(-frequency)
tbl$percentage=round(100*tbl$frequency/sum(tbl$frequency),2)
tbl$cumulative_perc=cumsum(tbl$percentage)
tbl$cumulative_perc[length(tbl$cumulative_perc)]=100.00
## calculating best font size
uq=nrow(tbl)
if(uq<=10)
{
letter_size=3
axis_size=12
} else if(uq<=20){
letter_size=2.5
axis_size=10
} else {
letter_size=2
axis_size=8
}
if(plot)
{
# Plot
tbl_plot=tbl
tbl_plot$label=sprintf('%s (%s%%)', tbl_plot$frequency, tbl_plot$percentage)
tbl_plot$category=factor(tbl_plot$category, levels = tbl_plot$category[order(tbl_plot$percentage)])
if(nrow(tbl_plot)<200)
{
p=ggplot(tbl_plot,aes(x=tbl_plot$category,y=tbl_plot$frequency,fill=tbl_plot$category, label=label)) +
geom_bar(stat='identity') + coord_flip() + theme_bw() +
theme(
panel.grid.minor=element_blank(),
panel.grid.major =element_blank(),
legend.title=element_blank(),
plot.title = element_text(vjust=2),
axis.ticks.y=element_blank(),
axis.ticks.x=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_text(size=axis_size),
axis.title.x=element_text(size=12, margin=margin(10,0,0,0)),
axis.title.y=element_text(size=14, margin=margin(0,10,0,0))
) + ylab("Frequency / (Percentage %)") + xlab(str_input) +
geom_text( color="#151515", size=letter_size, hjust=-.06) +
guides(fill=F) +
scale_y_continuous(expand = c(0,0),limits = c(0, max(tbl_plot$frequency)*1.2))
## Save plot
if(!is.na(path_out))
{
dir.create(path_out, showWarnings = F)
if(dir.exists(path_out))
{
jpeg(sprintf("%s/%s.jpeg", path_out, str_input), width= 12.25, height= 6.25, units="in",res=200, quality = 90)
plot(p)
dev.off()
} else {
warning(sprintf("The directory '%s' doesn't exists.", path_out))
}
} else {
plot(p)
}
} else {
message_high_card=sprintf("Skipping plot for variable '%s' (more than 200 categories)", str_input)
}
}
colnames(tbl)[1]=str_input
tbl[,str_input]=as.character(tbl[,str_input])
if(exists("message_high_card")) {warning(message_high_card)}
return(tbl)
}
|
336a35a561bbb19f78bbdcf2018d1aee14d017fd
|
75228b6212e470735565ae2ed604d5bcee757860
|
/man/run_stacking_child_models.Rd
|
15f2a7aa6e974d0d06fee32d81aeb436b3d1eff7
|
[] |
no_license
|
dahcase/mbgstacking
|
54c34ba5cd393f394c13e6fe0e15450489b32a44
|
7c0f335f680f5a78cc5bb19dda7e2e0de7e855ce
|
refs/heads/develop
| 2020-12-30T12:00:31.533693
| 2019-03-05T00:11:20
| 2019-03-05T00:11:20
| 91,488,490
| 0
| 0
| null | 2017-07-06T06:14:20
| 2017-05-16T17:57:24
|
R
|
UTF-8
|
R
| false
| true
| 530
|
rd
|
run_stacking_child_models.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run_stacking_child_models.R
\name{run_stacking_child_models}
\alias{run_stacking_child_models}
\title{Run child models for stacking}
\usage{
run_stacking_child_models(st)
}
\arguments{
\item{st}{stacker governer.}
}
\description{
Fits all models specified in the passed stacking object. M*C*K + M models are run where M is the number of initialized models,
C is the number of columns/iterations for crossfold validation and K is the number of folds.
}
|
aadc06449c80df1bd221c754e848bc39a59a6a5d
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/blorr/inst/testfiles/blr_pairs_cpp/libFuzzer_blr_pairs_cpp/blr_pairs_cpp_valgrind_files/1609956369-test.R
|
9444f14e008134107a7c22a87dbb2be88d8fefa6
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 858
|
r
|
1609956369-test.R
|
testlist <- list(x = -5.78579071015403e+303, y = c(2.36984410282499e-310, 3.22526053605166e-319, 4.02152998100784e-87, 1.26694375386342e-279, 8.28682114717519e+73, 4.89932798908251e-306, -4.80948513475625e+306, 5.48684425442826e-310, 3.28027968637254e-317, 0, NaN, 3.91107305076628e-274, -1.34765550943377e+28, 7.54642307645629e+179, 2.43773964113643e+35, 2.45333072621769e+35, 8.85449540542246e-159, 2.14327978499502e-312, 0, 128978.107570072, 1.59875023337884e-112, 8.98638192407672e-243, -7.03783040029927e-87, -1.303145228681e+28, 9.36372775213109e-97, 8.38205310386356e-309, 7.29111854293147e-304, -1.30736177482179e+28, 1.43267124225504e+214, 2.41745008861872e+35, 2.58996194552223e-307, 3.13178067538529e-115, 3.04553020513337e-115, -5.15273908894498e-36, 9.9268544386519e+247))
result <- do.call(blorr:::blr_pairs_cpp,testlist)
str(result)
|
db3013120ba31063beb3a9d4c21854550663e434
|
e14092f46ac4d514a18577c27f430ad2520d4ad5
|
/transform/impute_na.R
|
eb6dda23e180455a949c20058acfee816d8dc63c
|
[] |
no_license
|
jennyio/forecast
|
b39924e91f1456b237f607742976d353ef9531f3
|
f11e6937f809eae37c819684f34cb189cecc8924
|
refs/heads/main
| 2023-08-24T10:21:16.917391
| 2021-10-24T20:41:16
| 2021-10-24T20:41:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 409
|
r
|
impute_na.R
|
#' impute_na
#'
#' @param x numeric vector, with NA values
#' @description Impute missing values by 'next observation carried backwards'
#' @return
#' @export
impute_na <- function(x) {
# nobc = next observation carried backwards
ret <- try(imputeTS::na_locf(x, option = 'nocb'), silent = T)
if('try-error' %in% class(ret)) {
ret <- rep(NA_real_, length(x))
}
return(ret)
}
|
d13b56c2cc7a1284e1373410173dfbd0fc1c6dbd
|
eb68489f8c7c86a82cbbe80a37024e75b9e22e67
|
/SampleRCodeForOccurranceAnalysis.R
|
f743754649b857557deed5995a398fa567ed7fdb
|
[] |
no_license
|
kelseyefisher/employing-vhf-radio-telemetry-to-recreate-monarch-flight-paths
|
aa16065f0e14a12971afacf31bd4813c897263d9
|
3e3bcb803b16d8eb5983b15dfc8aad35d57439c8
|
refs/heads/master
| 2022-04-03T16:40:48.601299
| 2020-02-19T16:46:00
| 2020-02-19T16:46:00
| 237,479,055
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,192
|
r
|
SampleRCodeForOccurranceAnalysis.R
|
##################################################################################################
##read FORMATTED file
Monarch<-read.csv('RadioTagged_SummarizedEstimatedLocations.csv', header=TRUE)
head(Monarch)
#############################################################################################
library(ctmm)
##turn into a telemetry object
Monarch<-as.telemetry(Monarch)
###Plot all monarchs
plot(Monarch,
col=rainbow(length(Monarch)))
######################
####2016 Monarch a####
######################
#plot only the one monarch
plot(Monarch$`a`)
#fit a variogram
vg <- variogram(Monarch$`a`)
#guess the best model fit
GUESS <- ctmm.guess(Monarch$`a`,
interactive = FALSE)
#include the error estimates in the calculation
GUESS$error<-TRUE
#plot the variogram and the error guess variogram
plot(vg, GUESS)
#see all of the model fit AIC values
FIT<-ctmm.select(Monarch$`a`,GUESS,verbose = TRUE)
summary(FIT)
#Select best model and store the fit model
FIT<-ctmm.select(Monarch$`a`,GUESS)
summary(FIT)
#####Occurrence#####
OCCU1<-occurrence(Monarch$`a`,FIT)
plot(OCCU1)
plot(Monarch$`a`, UD=OCCU1)
|
b34aec9927b98c2e758fb86bf245cb6b2ec1e43a
|
da8e7f6767e1c2a3483f10bb6f1371ee208006ca
|
/main.R
|
f824d0f7bb5c836b23d4a69fb2c5757d4bc79411
|
[] |
no_license
|
emptycoder/TikhonovMethod
|
04daae18e4e0fd630ebb4b0da7476330896e3e4a
|
c7cebdbc922d4fdb348799b3eaf93b4114ce9d5e
|
refs/heads/master
| 2023-03-26T10:46:32.009520
| 2021-03-27T08:50:56
| 2021-03-27T08:50:56
| 351,232,242
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,907
|
r
|
main.R
|
require(readxl)
require(functional)
require(ggplot2)
source("methods.R")
data <- read_xlsx("matrixAndVector.xlsx")
# Preparing data
vector <- as.vector(data$Vector)
vector2 <- as.vector(data$Vector2)
matrix <- as.matrix(data[,3:6])
matrix <- matrix[apply(matrix, 1, Compose(is.finite, all)),]
matrix2 <- as.matrix(data[,10:13])
matrix2 <- matrix2[apply(matrix2, 1, Compose(is.finite, all)),]
# Matrix generation
#write.csv(matrix(runif(60 * 1, min = 0, max = 1), ncol = 1, nrow = 60), file = "test1.csv")
#h <- as.numeric(readline(prompt = "Enter h: "))
#delta <- as.numeric(readline(prompt = "Enter delta: "))
transponentMatrix <- t(matrix)
# Multiply transponented matrix with general matrix
MTGmatrix <- transponentMatrix %*% matrix
# Multiply transponented matrix with vector
MTVcoefficients <- transponentMatrix %*% vector
transponentMatrix2 <- t(matrix2)
# Multiply transponented matrix with general matrix
MTGmatrix2 <- transponentMatrix2 %*% matrix2
# Multiply transponented matrix with vector
MTVcoefficients2 <- transponentMatrix2 %*% vector2
if (alpha1 > alpha2) {
stop("alpha1 > alpha2")
}
x1 <- TihonovMethod(MTGmatrix, MTVcoefficients, alpha1 = 0.01, alpha2 = 0.1, delta = 0.0001, h = 0.0001, eps = 0.00001)
x2 <- TihonovMethod(MTGmatrix2, MTVcoefficients2, alpha1 = 0.01, alpha2 = 0.1, delta = 0.0001, h = 0.0001, eps = 0.00001)
graphicsData <- data.frame(vector, matrix %*% x1)
colnames(graphicsData) <- c("x", "y")
linearGraphicsData <- data.frame(vector2, matrix2 %*% x2)
colnames(linearGraphicsData) <- c("x", "y")
# Scatter diagram
ggplot(data = graphicsData, mapping = aes(x = graphicsData$x, y = graphicsData$y)) +
theme_light() +
geom_point(colour = "black", size = 1) +
geom_abline()
ggplot(data = linearGraphicsData, mapping = aes(x = linearGraphicsData$x, y = linearGraphicsData$y)) +
theme_light() +
geom_point(colour = "red", size = 1) +
geom_abline()
|
dd74a8f9466e2995c2a139cd9fe59042be998167
|
13f8119d64bfc96127215510f67ae69f1957edd3
|
/man/get_nfl_positions.Rd
|
f61ff51427c36a6114082ff113c8e2a272c0e656
|
[] |
no_license
|
kimjam/dfstoolkit
|
1a8c8642ee8adedcbb4895a91eb63c2fa53e2d65
|
885956efd26c6db8f72ba72e17ab9ef13c44500d
|
refs/heads/master
| 2020-05-31T20:37:41.533738
| 2017-09-04T17:56:38
| 2017-09-04T17:56:38
| 38,501,171
| 0
| 1
| null | 2017-09-04T17:56:39
| 2015-07-03T16:15:58
|
R
|
UTF-8
|
R
| false
| true
| 433
|
rd
|
get_nfl_positions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_boxscore_stats.R
\name{get_nfl_positions}
\alias{get_nfl_positions}
\title{get_nfl_positions}
\usage{
get_nfl_positions(links)
}
\arguments{
\item{links}{Player page urls}
}
\value{
Returns positions.
}
\description{
Get NFL player's position if snap counts table is unavailable.
If player has two listed positions, first one listed will be taken.
}
|
e37fa537522505dd793e6c571853736ab39046d4
|
d3849c224f6976451efb8b274e8fb340884ef9ee
|
/readphase.R
|
0b5dc841957a2c2fc57ed34d6cc14f19397508b3
|
[] |
no_license
|
ijwilson/sharedhaplotypes
|
5588ae67df4ab2caa272bdcf4d0d9e77421a60cd
|
f6b39374e34de20f24d402c46936d3533866880e
|
refs/heads/master
| 2020-05-23T09:04:43.565162
| 2019-10-31T00:00:00
| 2019-10-31T00:00:00
| 80,431,476
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,222
|
r
|
readphase.R
|
source("read_phase_functions.R")
# Read the pairs output. We have columns
pairs <- readpairs("phase1K.out_pairs")
colnames(pairs) <- c("name", "hap1", "hap2", "pprob")
## Split the phased output
splitter(pairs,c(4,8,11))
## The first row that begins HG
w = min(grep("HG",pairs[,1]))
## Get the raw haplotype frequencies
## These are the frequencies from the
raw <- as.matrix(read.table("match1K"))
raw <- t(raw)
b = apply(raw,1,paste,collapse="")
##
bcase <- mapply(function(x,y) x[1:(y[1]-1),], b, w, SIMPLIFY=FALSE)
casefreq(bcase[[1]],3,nsplit=4)
casefreq(bcase[[1]],2,nsplit=4)
bcontrol <- mapply(function(x,y){return(x[y[1]:y[2],])},b, w, SIMPLIFY=FALSE)
tb1 = table(c(paste(bcontrol[[1]][,7]),paste(bcontrol[[1]][,11])))
table(c(paste(bcontrol[[1]][,7]),paste(bcontrol[[1]][,12])))
b[[1]][1:10,]
table(c(paste(bcontrol[[1]][,6]),paste(bcontrol[[1]][,14])))
casefreq <- function(bbb,cl=1,nsplit=3) {
fam <- paste(bbb[,1])
hap <- paste(bbb[,5+cl])
hap[bbb[,5]=="A"] <- paste(bbb[bbb[,5]=="A",6+nsplit+cl])
tapply(bbb[,4],data.frame(fam=fam,hap=hap),sum)
}
raw <- as.matrix(read.table("match1K"))
raw <- t(raw)
b = apply(raw,1,paste,collapse="")
u <- substr(paste(b),15,23)=="ATGTGTCAC"
print(sum(u))
|
af1d2fd9b604303a03efd48c7d97bed88cd61f6b
|
5b722b3e230f078b59c232a0554c416150232bbf
|
/R/util-convert_otn_erddap_to_att.r
|
67a2fc641719e975cc982a1b1dc80ff705af947d
|
[] |
no_license
|
jsta/glatos
|
3abdd25c67455cacf04cbb7a9ca53718745ab917
|
f1e27cf63da53b2ae4c46e4b47d338c1f558d004
|
refs/heads/master
| 2022-07-29T00:04:04.783120
| 2022-07-05T18:56:00
| 2022-07-05T18:56:00
| 220,477,927
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,578
|
r
|
util-convert_otn_erddap_to_att.r
|
#' Convert detections, transmitter, receiver, and animal metadata to a format
#' that ATT accepts.
#'
#' Convert \code{glatos_detections} and transmitter, receiver, and animal
#' metadata from the OTN ERDDAP to \code{ATT} format for use in the Animal
#' Tracking Toolbox (\url{https://github.com/vinayudyawer/ATT}).
#'
#' @param detectionObj a data frame from \code{read_glatos_detections}
#'
#' @param erdTags a data frame with tag release data from the OTN ERDDAP
#'
#' @param erdRcv a data frame with receiver station data from the OTN ERDDAP
#'
#' @param erdAni a data frame with animal data from the OTN ERDDAP
#'
#'
#' @details This function takes 4 data frames containing detection, and ERDDAP
#' data from the tags, receivers, and animals tables, and transforms them into
#' 3 \code{tibble::tibble} objects inside of a list. The input that AAT uses
#' to get this data product is located here:
#' https://github.com/vinayudyawer/ATT/blob/master/README.md and our mappings
#' are found here: https://gitlab.oceantrack.org/GreatLakes/glatos/issues/83
#' in a comment by Ryan Gosse. The OTN ERDDAP instance is here:
#' https://members.oceantrack.org/erddap/tabledap/index.html but please note
#' that this only contains public data.
#'
#' @author Ryan Gosse
#'
#' @return a list of 3 tibble::tibbles containing tag dectections, tag metadata,
#' and station metadata, to be ingested by VTrack/ATT
#'
#' @examples
#'
#' #--------------------------------------------------
#' # EXAMPLE #1 - loading from the OTN ERDDAP + vignettes
#'
#' library(glatos)
#'
#' #get path to example files from OTN ERDDAP
#' ani_erd_file <- system.file("extdata", "otn_aat_animals.csv",
#' package = "glatos")
#' animals <- read.csv(ani_erd_file) # load the CSVs from ERDDAP
#'
#' tags_erd_file <- system.file("extdata", "otn_aat_tag_releases.csv",
#' package = "glatos")
#' tags <- read.csv(tags_erd_file)
#'
#' rcv_erd_file <- system.file("extdata", "otn_aat_receivers.csv",
#' package = "glatos")
#' stations <- read.csv(rcv_erd_file)
#'
#' #Remove first row; (blank or metadata about the column)
#' animals <- animals[-1,]
#' tags <- tags[-1,]
#' stations <- stations[-1,]
#'
#' #get blue shark example data
#' shrk_det_file <- system.file("extdata", "blue_shark_detections.csv",
#' package = "glatos")
#' blue_shark_detections <- read_otn_detections(shrk_det_file) # load shark data
#'
#' ATTdata <- convert_otn_erddap_to_att(blue_shark_detections,
#' tags, stations, animals)
#' @export
convert_otn_erddap_to_att <- function(detectionObj, erdTags, erdRcv, erdAni) {
transmitters <-
if(all(grepl("-", detectionObj$transmitter_id, fixed=TRUE))){
detectionObj$transmitter_id
} else {
concat_list_strings(detectionObj$transmitter_codespace, detectionObj$transmitter_id)
}
tagMetadata <- unique(tibble::tibble( # Start building Tag.Metadata table
Tag.ID = detectionObj$animal_id,
Transmitter = as.factor(transmitters),
Common.Name = as.factor(detectionObj$common_name_e)
))
tagMetadata <- unique(tagMetadata) # Cut out dupes
nameLookup <- tibble::tibble( # Get all the unique common names
Common.Name = unique(tagMetadata$Common.Name)
)
nameLookup <- dplyr::mutate(nameLookup, # Add scinames to the name lookup
Sci.Name = as.factor(purrr::map(nameLookup$Common.Name, query_worms_common))
)
# Apply sci names to frame
tagMetadata <- dplyr::left_join(tagMetadata, nameLookup)
# Matching cols that have different names
colnames(erdTags)[colnames(erdTags) == "tag_device_id"] <- "transmitter_id"
detectionObj <- dplyr::left_join(detectionObj, erdTags)
erdRcv <- dplyr::mutate(erdRcv,
station = as.character(purrr::map(erdRcv$receiver_reference_id,
extract_station))
)
# Matching cols that have different names
colnames(erdAni)[colnames(erdAni) == "animal_reference_id"] <- "animal_id"
detectionObj <- dplyr::left_join(detectionObj, erdAni)
releaseData <- tibble::tibble( # Get the rest from detectionObj
Tag.ID = detectionObj$animal_id,
Tag.Project = as.factor(detectionObj$animal_project_reference),
Release.Latitude = as.double(detectionObj$latitude),
Release.Longitude = as.double(detectionObj$longitude),
Release.Date = as.Date(detectionObj$time),
Sex = as.factor(detectionObj$sex)
)
releaseData <- dplyr::mutate(releaseData,
# Convert sex text and null missing columns
Sex = as.factor(purrr::map(Sex, convert_sex)),
Tag.Life = as.integer(NA),
Tag.Status = as.factor(NA),
Bio = as.factor(NA)
)
# Final version of Tag.Metadata
tagMetadata <- unique(dplyr::left_join(tagMetadata, releaseData))
detectionObj <- detectionObj %>%
dplyr::mutate(dummy=TRUE) %>%
dplyr::left_join(dplyr::select(erdRcv %>% dplyr::mutate(dummy = TRUE),
rcv_latitude = latitude,
rcv_longitude = longitude,
station,
receiver_model,
receiver_serial_number,
dummy,
deploy_datetime_utc = time,
recovery_datetime_utc)) %>%
dplyr::mutate(deploy_datetime_utc = as.POSIXct(deploy_datetime_utc,
format = "%Y-%m-%dT%H:%M:%OS"),
recovery_datetime_utc = as.POSIXct(recovery_datetime_utc,
format="%Y-%m-%dT%H:%M:%OS")) %>%
dplyr::filter(detection_timestamp_utc >= deploy_datetime_utc,
detection_timestamp_utc <= recovery_datetime_utc) %>%
dplyr::mutate(ReceiverFull = concat_list_strings(receiver_model,
receiver_serial_number)) %>%
dplyr::select(-dummy)
detections <- tibble::tibble(
Date.Time = detectionObj$detection_timestamp_utc,
Transmitter = as.factor(detectionObj$transmitter_id),
Station.Name = as.factor(detectionObj$station),
Receiver = as.factor(detectionObj$ReceiverFull),
Latitude = detectionObj$deploy_lat,
Longitude = detectionObj$deploy_long,
Sensor.Value = as.integer(detectionObj$sensorvalue),
Sensor.Unit = as.factor(detectionObj$sensorunit)
)
stations <- unique(tibble::tibble(
Station.Name = as.factor(detectionObj$station),
Receiver = as.factor(detectionObj$ReceiverFull),
Installation = as.factor(NA),
Receiver.Project = as.factor(detectionObj$collectioncode),
Deployment.Date = detectionObj$deploy_datetime_utc,
Recovery.Date = detectionObj$recovery_datetime_utc,
Station.Latitude = as.double(detectionObj$deploy_lat),
Station.Longitude = as.double(detectionObj$deploy_long),
Receiver.Status = as.factor(NA)
))
att_obj <- list(
Tag.Detections = detections,
Tag.Metadata = tagMetadata,
Station.Information = stations
)
class(att_obj) <- "ATT"
return(att_obj)
}
# Function for taking 2 lists of string of the same length and concatenating
# the columns, row by row.
concat_list_strings <- function(list1, list2, sep = "-") {
if (length(list1) != length(list2)) {
stop(sprintf("Lists are not the same size. %d != %d.",
length(list1), length(list2)))
}
return (paste(list1, list2, sep = sep))
}
# Simple query to WoRMS based on the common name and returns the sci name
query_worms_common <- function(commonName) {
url <- utils::URLencode(
sprintf("http://www.marinespecies.org/rest/AphiaRecordsByVernacular/%s",
commonName))
tryCatch({
print(url)
payload <- jsonlite::fromJSON(url)
return(payload$scientificname)
}, error = function(e){
print(geterrmessage())
stop(sprintf('Error in querying WoRMS, %s was probably not found.',
commonName))
})
}
# Convert the sex from 'F' and 'M' to 'FEMALE' and 'MALE'
convert_sex <- function(sex) {
if (toupper(sex) %in% c("F", "FEMALE")) return("FEMALE")
if (toupper(sex) %in% c("M", "MALE")) return("MALE")
return(sex)
}
# Converts the reciever reference id to station name
extract_station <- function(reciever_ref) {
reciever_ref <- as.character(reciever_ref)
return( # Split the string by _ and drop the array name
unlist(
strsplit(c(reciever_ref), c("_"))
)[-1]
)
}
|
23aa12454b67e816a21903df9060474261338511
|
7b9cfba80503c32354d36c58f1fddb1c577fd610
|
/functions/judge-lime.R
|
04d808131824cc13e3141b5f6bc39ed1e7ea2e0e
|
[
"MIT"
] |
permissive
|
DanOvando/scrooge
|
5e321c67797268e075273e7f710602e71bd2ef31
|
458f5950b2a029523b89fac20ac8b8e65e822d14
|
refs/heads/master
| 2021-08-22T17:36:05.134247
| 2018-10-22T22:48:23
| 2018-10-22T22:48:23
| 71,493,019
| 0
| 0
|
MIT
| 2018-07-26T22:56:01
| 2016-10-20T18:38:32
|
JavaScript
|
UTF-8
|
R
| false
| false
| 1,477
|
r
|
judge-lime.R
|
judge_lime <-
function(observed,
predicted,
observed_variable = f,
predicted_variable = "f_t",
group_level = year) {
group_level = enquo(group_level)
observed_variable = enquo(observed_variable)
predicted_values <- predicted %>%
filter(variable == predicted_variable)
observed_values <- observed %>%
dplyr::select(!!group_level,!!observed_variable) %>%
group_by(!!group_level) %>%
summarise(observed = mean(!!observed_variable)) %>%
mutate(year = year - min(year) + 1)
comparison <- observed_values %>%
left_join(predicted_values, by = "year") %>%
ungroup() ## fix this later for full flexibility
comparison_summary <- comparison %>%
filter(is.na(estimate) == F) %>%
summarise(rmse = sqrt(mean((estimate - observed) ^ 2)),
bias = median((estimate - observed) / observed)) #%>%
comparison_plot <- comparison %>%
ggplot() +
geom_pointrange(aes(year, estimate, ymin = lower, ymax = upper), alpha = 0.75) +
geom_line(aes(year, observed), color = "red") +
geom_point(aes(year, observed), color = "red") +
labs(
caption = glue::glue(
"RMSE = {prettyNum(comparison_summary$rmse, digits = 2)} ; Bias = {prettyNum(comparison_summary$bias, digits = 2)}"
)
)
return(list(comparison_plot = comparison_plot,
comparison_summary = comparison_summary))
}
|
a3201a8777c6583f42c2042fd0059101ce484411
|
9db210febfc62d8eec279ab6b577fd44cfc067f4
|
/R/density_LS.R
|
20ca85b16701d33abc931b56ffc4d0ac5e717f1f
|
[] |
no_license
|
cran/EBCHS
|
b9b35b06efcf5788a57c88df9b1bea64b000cf35
|
c432c0c997145eaeb9316bf610b8d9096d493dcb
|
refs/heads/master
| 2023-05-20T07:59:52.157989
| 2021-06-01T06:20:08
| 2021-06-01T06:20:08
| 372,897,190
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,612
|
r
|
density_LS.R
|
#' log-density derivatives--parametric approach
#'
#' Assuming the log density of the chi-squared statistics admits a parametric form, this function
#' estimates up to the fourth order log density derivatives.
#'
#' @param x a sequence of chi-squared test statistics
#' @return a list: the first-to-fourth log density derivatives
#'
#' @examples
#' p = 1000
#' k = 7
#' # the prior distribution for lambda
#' alpha = 2
#' beta = 10
#' # lambda
#' lambda = rep(0, p)
#' pi_0 = 0.8
#' p_0 = floor(p*pi_0)
#' p_1 = p-p_0
#' lambda[(p_0+1):p] = stats::rgamma(p_1, shape = alpha, rate=1/beta)
#' # Generate a Poisson RV
#' J = sapply(1:p, function(x){rpois(1, lambda[x]/2)})
#' X = sapply(1:p, function(x){rchisq(1, k+2*J[x])})
#' out = density_LS(X)
#'
#' @export
density_LS <- function(x){
ONE = rep(1, length(x))
ZERO = rep(0, length(x))
B_1 = cbind(ONE, x)
B_2 = cbind(ZERO, ONE)
B_3 = cbind(ZERO, ZERO)
B_4 = cbind(ZERO, ZERO)
G = t(B_1)%*%B_1/dim(B_1)[1]
xx = matrix(rep(x, dim(B_1)[2]), ncol=dim(B_1)[2])
h = (-1)*apply(B_1+B_2*xx, 2, mean)
beta = solve(G, h)
# Output
# the first to fourth derivative estimation
d_1 = matrix(rep(1/x, dim(B_1)[2]), ncol = dim(B_1)[2])
d_2 = matrix(rep(1/x^2, dim(B_1)[2]), ncol = dim(B_1)[2])
d_3 = matrix(rep(1/x^3, dim(B_1)[2]), ncol = dim(B_1)[2])
d_4 = matrix(rep(1/x^4, dim(B_1)[2]), ncol = dim(B_1)[2])
l_1 = (B_1*d_1)%*%beta
l_2 = (B_2*d_1-B_1*d_2)%*%beta
l_3 = (B_3*d_1-2*B_2*d_2+2*B_1*d_3)%*%beta
l_4 = (B_4*d_1-3*B_3*d_2+6*B_2*d_3-6*B_1*d_4)%*%beta
out = list(l_1 = l_1, l_2= l_2, l_3=l_3, l_4 = l_4)
return(out)
}
|
b790cbeeee4631fc7b85c866ad071245ef149800
|
4b92cdefa377126dfbf2e79a831d82ec10f083b1
|
/R/aop_cytoscape_methods.R
|
03b69844f9fff09075c6cf4f478385a667b8cefe
|
[
"LicenseRef-scancode-us-govt-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
DataSciBurgoon/aop
|
afe589a69a9f5431f59e860a1e0e49fdca1f04ed
|
323b08977970cc7f76eeccb1e299b9844a9e0be7
|
refs/heads/master
| 2020-04-08T15:04:56.472133
| 2015-09-29T15:54:41
| 2015-09-29T15:54:41
| 41,174,739
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,037
|
r
|
aop_cytoscape_methods.R
|
#' @include aopCytoscape_Class.R
NULL
#' Get Node Name from ID
#'
#'
#' Given an id, this method returns an \code{aop_cytoscape} node name.
#'
#' @param theObject is an AOP as an object of class \code{aop_cytoscape}.
#'
#' @param id an object of class \code{character} such as "389".
#'
#' @return the name of the node
#'
#' @export
#' @docType methods
#' @rdname aop_cytoscape-methods
#'
#' @examples
#' library(graph)
#' steatosis_json_file <- system.file("extdata", "steatosis_aop_json.cyjs",
#' package = "aop")
#' steatosis_aop <- convert_cytoscape_to_aop(steatosis_json_file)
#' getAOPNodeName(steatosis_aop, "389")
setGeneric(name="getAOPNodeName",
def=function(theObject, id){
standardGeneric("getAOPNodeName")
}
)
#' @rdname aop_cytoscape-methods
setMethod(f="getAOPNodeName",
signature="aop_cytoscape",
definition=function(theObject, id){
return(theObject@nodes[[id]]$data$name)
}
)
|
0a8473175c08bbb0927857178d6ecc7cb3e2bbdd
|
9f77e2840e495b200ef099608c79dc1c22aaa798
|
/project2.R
|
d17380c2cc09f0d2d981d59f11903fd3d6b8e2a4
|
[] |
no_license
|
tdhock/DeepLearning-
|
7b3ad7f793cd66996cf522f41f9dd0e32e98f5d2
|
a436507fdead6b21506bd3761598c901ca4f9a39
|
refs/heads/master
| 2022-06-19T03:21:43.116788
| 2020-05-02T01:27:25
| 2020-05-02T01:27:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,474
|
r
|
project2.R
|
library(data.table)
library(ggplot2)
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
# predicts the most occuring label on everything
baseline <- function( x_mat, y_vec, x_test, y_test )
{
zeros <- length(y_vec[ y_vec == 0 ])
ones <- length(y_vec[ y_vec == 1 ])
baseline = max( zeros, ones )
baselinePred <- vector(mode= "numeric" , length = nrow(y_test))+baseline
error <- colMeans(y_test == baselinePred)
return (error)
}
# runs nncv with 1 neighbor
nearest_1_neighbors <- function( x_mat, y_vec, x_new,
num_folds = 5 )
{
return (nearest_neighbors_cv(x_mat, y_vec, x_new,
num_folds, 1))
}
# runs an algorithm with a fold vector to get an average error
k_fold_cv <- function(x_mat, y_vec, compute_predictions, fold_vec) {
error_vec <- vector(length = length(unique(fold_vec)))
for (fold in unique(fold_vec)) {
x_new <- x_mat[fold_vec == fold, ]
y_new <- y_vec[fold_vec == fold]
x_train <- x_mat[fold_vec != fold, ]
y_train <- y_vec[fold_vec != fold]
pred_new <- compute_predictions(x_train, y_train, x_new)
loss <- colMeans(y_new == as.vector(pred_new))
error_vec[fold] <- loss
}
return(error_vec)
}
# runs nn alg on training set with folds then give error
# rate on x_new
nearest_neighbors_cv <- function(x_mat, y_vec, x_new,
num_folds = 5, max_neighbors = 20) {
validation_fold_vec <- sample(rep(1:num_folds, l = nrow(x_mat)))
error_mat <- matrix(nrow = num_folds, ncol = max_neighbors)
mean_error_vec <- vector(length = max_neighbors)
for (num_neighbors in 1:max_neighbors) {
wrap_knn <- function(x_mat,y_vec, x_new, k)class::knn(x_mat,as.matrix(x_new),as.matrix(y_vec), k = max_neighbors)
error <- k_fold_cv(x_mat, y_vec,
compute_predictions = wrap_knn,
validation_fold_vec)
error_mat[, num_neighbors] <- error
}
mean_error_vec <- colMeans(error_mat)
best_neighbors <- which.min(mean_error_vec)
best_pred <- class::knn(x_mat, as.matrix(x_new), as.matrix(y_vec), k = best_neighbors)
results_list <- list(best_pred, mean_error_vec, error_mat)
names(results_list) <- c("best", "mean_error_vec", "error_mat")
return(results_list)
}
# read in data
spam_datatable <- data.table::fread("spam.data")
# split data into usable elements
x <- spam_datatable[, -58]
x_scale <- scale(x)
y <- spam_datatable[, 58]
# run nncv for an error rate to graph
result <- nearest_neighbors_cv(x_scale, y, t(vector(length = ncol(x))))
ggplot() +
geom_line(aes(
c(1:20), result$mean_error_vec),
data = as.data.table(result$mean_error_vec ), size = 3)+
geom_point( aes( which.min(result$mean_error_vec), min(result$mean_error_vec) ), size = 7)
# create new test fold vector
test_fold_vec <- sample(rep(1:4, l = nrow(x_scale)))
# make a table to display of zeros and ones in each fold
fold_list<- list()
for (fold in unique(test_fold_vec))
{
fold_vals <- as.vector(y)[ fold == test_fold_vec ]
zeros <- fold_vals[ as.vector(fold_vals==0) ]
ones <- fold_vals[ as.vector(fold_vals==1) ]
fold_list[[fold]] <- ( list( zeros = length(t(zeros)), ones = length(t(ones))) )
}
do.call(rbind, fold_list)
# run kfoldcv with different algs to prove nncv is best
err.dt.list <- list()
## assign folds.
for(test.fold in 1:4){
## split into train/test sets.
x_new <- x_scale[test_fold_vec == test.fold, ]
y_new <- y[test_fold_vec == test.fold]
x_train <- x_scale[test_fold_vec != test.fold, ]
y_train <- y[test_fold_vec != test.fold]
wrap_knncv <- function(x_train,y_train, x_new)nearest_neighbors_cv(x_train,as.matrix(x_new),as.matrix(y_train))
wrap_1nn <- function(x_train,y_train, x_new)nearest_1_neighbors(x_train,as.matrix(x_new),as.matrix(y_train))
wrap_baseline <- function(x_train,y_train, x_new)baseline(x_train,as.matrix(x_new),as.matrix(y_train))
for(algorithm in c("wrap_knncv", "wrap_1nn", "wrap_baseline")){
## run algorithm and store test error.
error.percent <- k_fold_cv( x_train, y_train, compute_predictions = algorithm, test_fold_vec)
err.dt.list[[paste(test.fold, algorithm)]] <- data.table(
test.fold, algorithm, error.percent)
}
}
err.dt <- do.call(rbind, err.dt.list)
|
13f1479f20ba3b702abf2186cf3d811c51fa0737
|
e1b66eb9387ccc2012fe645450aba3fc3247457f
|
/R/gdalControls.R
|
df349e175602bda60f39f052af734bc600ce82dd
|
[
"MIT"
] |
permissive
|
mlampros/MODIS
|
26d0f6e27404300c342b6d1e23e181d41d570437
|
012c3fa9960fdaa57d7abe70bb19ee0307ea800a
|
refs/heads/master
| 2022-12-05T18:24:22.973205
| 2020-06-24T05:58:11
| 2020-06-24T05:58:11
| 289,838,528
| 1
| 0
|
NOASSERTION
| 2020-08-24T05:42:17
| 2020-08-24T05:42:16
| null |
UTF-8
|
R
| false
| false
| 3,506
|
r
|
gdalControls.R
|
### input projection -----
InProj <- function(product) {
if (product@TYPE[1] == "Tile") {
"+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +R=6371007.181 +units=m +no_defs"
} else {
"+proj=longlat +ellps=clrk66 +no_defs"
}
}
### output projection -----
OutProj <- function(product, extent, ...) {
opts <- combineOptions(...)
cat("########################\n")
if(!is.null(extent@target$outProj)) {
outProj <- checkOutProj(extent@target$outProj, tool = "GDAL")
cat("outProj = ", if (inherits(outProj, "crs")) outProj$proj4string else outProj, " (if applicable, derived from Raster*/Spatial*/sf* object)\n")
} else {
outProj <- checkOutProj(opts$outProj, tool = "GDAL")
cat("outProj = ", if (inherits(outProj, "crs")) outProj$proj4string else outProj, "\n")
}
if (outProj == "asIn") {
if (product@TYPE[1] == "Tile") {
outProj <- "+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +R=6371007.181 +units=m +no_defs"
} else {
outProj <- "+proj=longlat +ellps=clrk66 +no_defs" # CMG proj
}
}
if (inherits(outProj, "crs")) outProj$proj4string else outProj
}
### output pixel size -----
PixelSize <- function(extent, ...) {
opts <- combineOptions(...)
if(!is.null(extent@target$pixelSize)) {
pixelSize <- extent@target$pixelSize
cat("pixelSize = ", pixelSize, " (if applicable, derived from Raster* object)\n")
} else {
pixelSize <- opts$pixelSize
cat("pixelSize = ", pixelSize, "\n")
}
if (pixelSize[1] != "asIn") {
if (length(pixelSize) == 1) {
rep(pixelSize, 2)
} else {
pixelSize
}
}
}
### resampling type -----
ResamplingType <- function(...) {
opts <- combineOptions(...)
opts$resamplingType <- checkResamplingType(opts$resamplingType, tool = "gdal")
cat("resamplingType = ", opts$resamplingType, "\n")
opts$resamplingType
}
### target extent -----
TargetExtent <- function(extent, outProj) {
if (!is.null(extent@target$extent)) { # all extents but not tileV/H
if (is.null(extent@target$outProj)) { # map or list extents (always LatLon)
rx <- raster(extent@target$extent, crs = "+init=epsg:4326")
rx <- projectExtent(rx, outProj)
rx <- extent(rx)
} else {
rx <- extent@target$extent
}
}
if (is.null(extent@target)) {
if(!is.null(extent@extent)) {
rx <- raster(extent@extent, crs = "+init=epsg:4326")
# suppress 'Discarded ... unknown in CRS definition' warning
rx <- suppressWarnings(projectExtent(rx, outProj))
rx <- extent(rx)
}
}
if (exists("rx")) {
as.character(sf::st_bbox(rx))
}
}
### block size -----
BlockSize <- function(...) {
opts <- combineOptions(...)
if (!is.null(opts$blockSize)) {
opts$blockSize <- as.integer(opts$blockSize)
paste0("BLOCKYSIZE=", opts$blockSize)
}
}
### output compression -----
OutputCompression <- function(...) {
opts <- combineOptions(...)
if (is.null(opts$compression) || isTRUE(opts$compression)) {
c("compress=lzw", "predictor=2")
}
}
### quiet output -----
QuietOutput <- function(...) {
opts <- combineOptions(...)
## if 'quiet = FALSE' or not available, show full console output
if ("quiet" %in% names(opts)) {
if (opts$quiet) "-q"
}
}
### gdal drivers ----
getGdalDrivers = function() {
sf::st_drivers(
what = "raster"
)
}
getGdalWriteDrivers = function() {
subset(
getGdalDrivers()
, write
)
}
|
97e010d44880fccb8a9ee14e45faf9fa6e9e09f0
|
ef4eb23543224c14f4cae67190d1f82bd881a4a4
|
/IDESSA/BushEncroachment/GoogleTrainingSites/Valdidation_RF_Model.R
|
6a16f0fc86301a3d74683d9af742cba6ccd4e7aa
|
[] |
no_license
|
environmentalinformatics-marburg/magic
|
33ed410de55a1ba6ff943090207b99b1a852a3ef
|
b45cf66f0f9aa94c7f11e84d2c559040be0a1cfb
|
refs/heads/master
| 2022-05-27T06:40:23.443801
| 2022-05-05T12:55:28
| 2022-05-05T12:55:28
| 9,035,494
| 6
| 7
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,432
|
r
|
Valdidation_RF_Model.R
|
################### Control Random Forest
setwd(out)
load("test.RData")
load("results.RData")
pred_test <- c(predict(results, test))
obs_test <- c(test$class)
cm<-confusionMatrix(pred_test,obs_test)$table
#ROC-Curve
AUC=roc(obs_test,pred_test)$auc[1]
Validation<-classificationStats(pred_test, obs_test)
Validation$AUC<-AUC
write.table(cm,"ConfusionMatrix.txt")
write.table(Validation,"Validation.txt",row.names=F)
varimp <- varImp(results)
row.names(varimp$importance)<-c("Red","Green","Blue","VVI","Hue", "Saturation", "Value",
"Red 3x3 mean","Green 3x3 mean","Blue 3x3 mean", "VVI 3x3 mean",
"Hue 3x3 mean", "Saturation 3x3 mean", "Value 3x3 mean",
"Red 3x3 sd","Green 3x3 sd","Blue 3x3 sd", "VVI 3x3 sd",
"Hue 3x3 sd", "Saturation 3x3 sd", "Value 3x3 sd", "Biome")
pdf("varImp.pdf",width=6,height=4.5)
plot(varimp,15,col="black")
dev.off()
load("All_classified.df.RData")
pdf("Reliability.pdf",width=8,height=3.5)
par(mar=c(0,3,0,3))
boxplot(All_classified.df$reliability*100,horizontal=TRUE,col="grey90", notch=TRUE,pch=8,cex=0.5,bty="n",
xaxt="n",yaxt="n",frame.plot=FALSE)
axis(1, at=c(0,10,20,30,40,50,60,70,80,90,100),labels=c("0","10","20","30","40","50","60","70","80","90","100"),
col.axis="black", las=1,pos=0.75)
text(50, 0.56, "Percent", xpd = TRUE)
dev.off()
|
64a125fd26d29405b434fe862d63f5a96db8a432
|
7ff4a3a085076ee33d34dd0dacb171ac7eb99240
|
/R/atsmeans.R
|
6687d31e0c4406b96f4dd3188ed9dfa75da7c29e
|
[] |
no_license
|
tonizhong/SMARTAR
|
d77ff84835845ab66e62d06816d9bd96248efdaa
|
b0002af226857a0ebbd9134cabe672b2dfe3e377
|
refs/heads/master
| 2023-01-03T11:48:51.774436
| 2020-10-27T13:01:12
| 2020-10-27T13:01:12
| 286,015,943
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,940
|
r
|
atsmeans.R
|
#' Identify adaptive treatment strategy and estimate strategy values
#'
#' Return a message that lists all the
#' adaptive treatment strategy embedded in SMART design.
#' It also gives the estiamted strategy values and the
#' variance-covariance matrix of estimated values.
#' @param data Input data frame of the sequential
#' randomized trial (SMART) data used for analysis.
#' The data should include the variables of stage-specific
#' treatments (At; t=1,2,3), intermediate evaluation
#' (Ot; t=1,2,3) and final primary outcome (Y),
#' where t represent the number of stages embedded in design.
#' If stage-1 treatment (A1) takes into account the information
#' of baseline evaluation, O1 needed to be include in data,
#' otherwise not.
#' @param family A character string to specify the
#' type of final primary outcome. The default is
#' family=“gaussian”, which refers to the continuous
#' primary outcome. If family=”binomial” then the
#' primary outcome will be treated as binary variable.
#' @param method A character string to specify the
#' method of estimation. If method="Gest" then
#' G-computation method is used. If method="IPW" then
#' Inversed Probability Weighting method is used.
#' @param digits An integer indicating the number
#' of decimal places for sequence-specific mean and variance.
#' Default is digits=NULL.
#' @param common If common=TRUE, the pooled variance across
#' all the treatment sequences are used in estimation.
#' Otherwise use the sequence-specific variance. The default is common=FALSE.
#' @param conf If conf=TRUE, output confidence intervals
#' for estimate strategy values. The default is conf=TRUE.
#' @param alpha Type I error rate control for confidence
#' interval. The default is alpha=0.05.
#' @param plot If plot=TRUE, output the graphs of
#' treatment effects with CIs. The default is plot=TRUE.
#' @param title Characters indicating the title of the graphs.
#' Default is “Strategy values with confidence intervals”.
#' @param color Characters indicating the color of the graphs.
#' Default is color=“forestgreen”.
#' @param ylab Characters to specify the label of the
#' vertical axis of the output figure.
#' Default is “Strategy value”.
#' @param xlab characters to specify the label of the horizontal
#' axis of the output figure.
#' @param xtext Specification for the text of the horizontal axis of the graphs.
#' @param pch An integer to specify the shape of points in the graphs.
#' The default is pch=15.
#' @param cex An integer to specify the amount by which plotting symbols
#' should be magnified. The default is cex=2.
#' @param lwd An integer to specify the line width,
#' The lines refer to the width of the confidence interval.
#' The default is lwd=1.
#' @param ylim Integers to specify the maximum and minimum value of
#' y axis.
#' @param mar A numerical vector of the form c(bottom, left, top, right)
#' which gives the number of lines of margin to be specified
#' on the four sides of the plot.
#' @param cex.axis The magnification to be used for the horizontal axis
#' annotation relative to the current setting of cex.
#' @param line Specifying a value for line overrides
#' the default placement of label
#' of the horizontal axis of the graphs.
#' @return
#'
#' An object of ``value” is return, which contain
#' the index of all the adaptive treatment strategies,
#' strategy-specific sample sizes and estimated values
#' with standardized errors.
##' \itemize{
##' \item ATS: Index of adaptive treatment strategy
##' from 1 to G, where G is total number of strategies
##' defined in SMART
##' \item ds: Stage-specific decision makings given
##' certain histories corresponding to each strategy.
##' The number of columns of ``ds'' is defined by strategy
##' and details are shown in the output.
##' \item N: Number of subjects following a strategy.
##' \item value: Estimated strategy values.
##' \item se: standard errors of estimation
##' \item lower.CI: Lower bound of (1-alpha) level confidence
##' interval for strategy values
##' \item upper.CI: Upper bound of (1-alpha) level confidence
##' interval for strategy values
##' }
#' An object of ``vmat'' is return, which is variance-covariance matrix of
#' estimated strategy values
#'
#' @references Lavori P.W. and Dawson R. (2007). Improving the
#' efficiency of estimation in randomization trials of adaptive
#' treatment strategies. \emph{Clinical Trials}, 4: 297-308.
#' @references Ko and Wahed A.S. (2015). Design of sequentially
#' randomization trials for testing adaptive treatment strategies.
#' \emph{Statistics in Medicine}, 31, 812-830.
#'
#' @examples
#'
#' atsmeans(data=codiacs,family="gaussian",method="Gest",
#' conf=TRUE,common=TRUE,alpha=0.05,plot=TRUE,pch=12,lwd=2)
#'
#' @export
#atsmeans=function(data,family="normal",method="Gest",
#common=FALSE,conf=TRUE,alpha=0.05,plot=FALSE){
atsmeans<-function(data,family=c("gaussian","binomial")[1],
method=c("Gest","IPW")[1],
digits=NULL,common=FALSE,conf=TRUE,
alpha=0.05,plot=FALSE,
title="Strategy values with confidence interval",color="forestgreen",
ylab="Strategy value",
xlab=NULL,xtext=NULL,pch=15,cex=2,lwd=3,
ylim=NULL,mar=NULL,cex.axis=1,line=NULL){
D<-as.data.frame(data)
FA<-family
Ma<-method
Com<-common
Al<-alpha
if (is.null(D$O1)) {Base<-0} else {Base<-1}
Nstage<-nstage(data=D)
Umat<-em(data=D,method=Ma)
Val<-Umat$value
Vmat<-evcmat(data=D,family=FA,
method=Ma,common=Com)
se<-sqrt(diag(Vmat))
CIs<-atsci(eumat=Umat,evmat=Vmat,alpha=Al)
if (conf==FALSE) {Umat<-data.frame(Umat,se)} else
if (conf==TRUE) {Umat<-data.frame(Umat,se,CIs)}
message(paste("$value: estimated strategy values
(with confidence intervals)",
"$vmat: variance-covariance matrix
of estimated strategy values \n",
sep="\n"))
if (Nstage==1 && Base==0) {
message("A strategy is
defined as a single-stage
decision making (d0) for A1 at baseline")
opar<-par(mar=c(4,4,4,3))
on.exit(par(opar))} else
if (Nstage==1 && Base==1) {
message(paste("A strategy is defined as a vector of
single-stage decision makings (d0,d1),",
"each of which corresponds to a
possible outcome of baseline evulation (O1). \n",
"d0 is the stage-1 decision making
for A1, conditioning on O1=0",
"d1 is the stage-1 decision making
for A1, conditioning on O1=1",
sep="\n"))
opar<-par(mar=c(5,4,4,3))
on.exit(par(opar))
} else
if (Nstage==2 && Base==0) {
message(paste("A strategy is defined as a vector of
decision makings (d0;d00,d01) for 2 stages \n",
"d0 is the stage-1 decision making for A1",
"d00 is the stage-2 decision making for A2,
conditioning on A1=d0 and O2=0",
"d01 is the stage-2 decision making for A2,
conditioning on A1=d0 and O2=0",
sep="\n"))
opar<-par(mar=c(6,4,4,3))
on.exit(par(opar))
} else
if (Nstage==2 && Base==1) {
message(paste("A strategy is defined as a vector of decision
makings (d0,d1;d00,d01,d10,d11) \n",
"d0 is the stage-1 decision making conditioning on O1=0",
"d1 is the stage-1 decision making conditioning on O1=1",
"d00 is the stage-2 decision making conditioning
on A1=d0 and O2=0",
"d01 is the stage-2 decision making conditioning
on A1=d0 and O2=0",
"d00 is the stage-2 decision making conditioning
on A1=d1 and O2=0",
"d01 is the stage-2 decision making conditioning
on A1=d1 and O2=0",
sep="\n"))
opar<-par(mar=c(7,4,4,3))
on.exit(par(opar))
} else
if (Nstage==3 && Base==0) {
message(paste("A strategy is defined
as a vector of decision makings
(d0;d00,d01;d000,d001,d010,d111) \n",
"d0 is the stage-1 decision making",
"d00 is the stage-2 decision making conditioning on
A1=d0 and O2=0",
"d01 is the stage-2 decision making conditioning on
A1=d0 and O2=0",
"d000 is the stage-3 decision making conditioning on
A1=d0, O2=0, A3=d00 and O3=0",
"d001 is the stage-3 decision making conditioning on
A1=d0, O2=0, A3=d00 and O3=1",
"d010 is the stage-3 decision making conditioning on
A1=d0, O2=1, A3=d01 and O3=0",
"d011 is the stage-3 decision making conditioning on
A1=d0, O2=1, A3=d01 and O3=1",
sep="\n"))
opar<-par(mar=c(8,4,4,3))
on.exit(par(opar))
}
if (plot==TRUE) {atsciplot(uimat=Umat,
nstage=Nstage,baseline=Base,title=title,
col=color,ylab=ylab,xlab=xlab,
pch=pch,cex=cex,lwd=lwd,xtext=xtext,
lim=ylim,mar=mar,cex.axis=cex.axis,
line=line)}
if (is.null(digits)) {
outcome<-list(value=Umat,vmat=Vmat)
attr(outcome,'class') <- c('myclass','list')
return(outcome)}
else {
outcome<-list(value=round(Umat,digits),
vmat=round(Vmat,digits))
attr(outcome,'class') <- c('myclass','list')
return(outcome)}
}
|
a337f31b17001ec95d35215fb4dbcd4fac91368f
|
c944ad97280ca8f6987283fd9f0d392f376f8d75
|
/man/get_city_issues.Rd
|
aa033c40e374c51fcf29f5ddbd2f5f62703e802d
|
[] |
no_license
|
clemp/seeclickfixr
|
f86eeb24496b5797db640c340c59d2945f02ce16
|
58618f084a60340799bd98e277d9542b94c819ee
|
refs/heads/master
| 2021-01-22T10:18:29.936844
| 2015-09-18T20:51:08
| 2015-09-18T20:51:08
| 17,094,970
| 1
| 2
| null | 2015-09-18T20:51:08
| 2014-02-22T21:18:12
|
R
|
UTF-8
|
R
| false
| false
| 3,212
|
rd
|
get_city_issues.Rd
|
\name{get_city_issues}
\alias{get_city_issues}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Query all issues in a given location
}
\description{
Returns all issues in the specific location, specified either by coordinates or by name/address.
}
\usage{
get_city_issues(city, status = "open,acknowledged,closed,archived", limit = 100)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{city}{
A written description of the location for which issue types should be returned. If city is specified, lat/long should not be.
}
\item{lat}{
Latitude of coordinates, specified instead of city.
}
\item{long}{
Longitude of coordinates, specified instead of city.
}
\item{status}{
Which status types of issues should be returned. Separate statuses should be separated by commas without spaces. Available options are open, acknowledged, closed, and archived. Default is all.
}
\item{limit}{
Number of items to return. Defaults to 100.
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
\item{issue_id}{Unique ID number for the given issue}
\item{status}{Status of the issue (open/closed)}
\item{summary}{Summary of the status for the issue}
\item{description}{Text description of the issue as reported}
\item{rating}{Importance rating of the issue}
\item{lat}{Latitude of the issue as reported}
\item{lng}{Longitude of the issue as reported}
\item{issue_address}{Address of the issue as reported}
\item{created_at}{Date and time when issue report was created}
\item{acknowledged_at}{Date and time when issue report was acknowledged by city}
\item{closed_at}{Date and time when issue report was closed by city}
\item{reopened_at}{Date and time when issue report was reopened, if it was}
\item{updated_at}{Date and time when issue report was last updated}
\item{shortened_url}{Shortened URL of the issue report}
\item{video_url}{URL for the video of the issue, if provided}
\item{image_full}{Image of the issue as reported}
\item{image_square_100x100}{Square version of the image of the issue}
\item{representative_image_url}{A representative image of the issue, if no actual image was submitted}
\item{issue_type}{Type of issue}
\item{url}{URL to the report of the issue}
\item{html_url}{URL to the report of the issue in HTML format}
\item{comment_url}{URL to the comments on the issue}
\item{flag_url}{URL to the flag for the issue}
\item{close_url}{URL to the closing report of the issue}
\item{open_url}{URL to the opening report of the issue}
\item{reporter_id}{Issue reporter's unique ID number}
\item{reporter_name}{Name of the issue reporter}
\item{reporter_wittytitle}{Username/witty name of the issue reporter}
\item{reporter_role}{Issue reporter's role in the city, if any}
\item{reporter_civicpoints}{Number of civic points the issue reporter has, if any}
\item{reporter_avatar_full}{Chosen avatar of the issue reporter}
\item{reporter_avatar_square}{Square version of the avatar}
}
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
f1208d36bb2f9b89edd84034c91081489828b64a
|
cf2e5981c5aab9cce49718bf01616e172ffb57d2
|
/R/perguntas.R
|
8d20d8270e00c748031e63de8b84eddc4c3bd904
|
[] |
no_license
|
victorh1705/World-Bank
|
f8966e1cdbddf823f6caabc19d121375f66ea739
|
493d18045db340824f7f9e7b24b6716a43c54120
|
refs/heads/master
| 2020-03-12T11:55:42.651296
| 2018-06-21T21:18:38
| 2018-06-21T21:18:38
| 130,607,227
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 28,283
|
r
|
perguntas.R
|
warnings()
# install.packages('devtools')
# install.packages('plotly')
# install_github('EL-BID/Libreria-R-Numeros-para-el-Desarrollo')
# install_github("arcuellar88/govdata360R")
# install_github('EL-BID/Agregador-de-indicadores')
library(iadbstats)
library(devtools)
library(agregadorindicadores)
library(ggplot2)
library(plotly)
library(RSQLite)
source("R/indicadores.R")
# PARA VER COMO A BIBLIOTECA FUNCIONA, ACESSAR:
# https://rdrr.io/github/EL-BID/Agregador-de-indicadores/f/README.md
# SE.XPD.TOTL.GD.ZS: Government expenditure on education, total (% of GDP)
# SE.XPD.TOTL.GB.ZS: Expenditure on education as % of total government expenditure (%)
# SE.XPD.TERT.ZS, Expenditure on tertiary as % of government expenditure on education (%)
# SE.XPD.TERT.PC.ZS, Government expenditure per tertiary student as % of GDP per capita (%)
# SE.XPD.SECO.ZS, Expenditure on secondary as % of government expenditure on education (%)
# SE.XPD.SECO.PC.ZS, Government expenditure per student, secondary (% of GDP per capita)
# SE.XPD.PRIM.ZS, Expenditure on primary as % of government expenditure on education (%)
# SE.XPD.PRIM.PC.ZS, Government expenditure per student, primary (% of GDP per capita)
# SE.XPD.CTOT.ZS, Current education expenditure, total (% of total expenditure in public institutions)
# SE.XPD.CTER.ZS, Current education expenditure, tertiary (% of total expenditure in tertiary public institutions)
# SE.XPD.CSEC.ZS, Current education expenditure, secondary (% of total expenditure in secondary public institutions)
# SE.XPD.CPRM.ZS, Current education expenditure, primary (% of total expenditure in primary public institutions)
# GB.XPD.RSDV.GD.ZS,Research and development expenditure (% of GDP)
# NY.GDP.PCAP.KD.ZG,GDP per capita growth (annual %)
# NY.GDP.MKTP.KD.ZG,GDP growth (annual %)
#########################################
#########################################
#########################################
primeiraPergunta <- function() {
cat("NOME PERGUNTA: primeiraPergunta\n")
cat("DADOS ANALISADOS: \n")
cat("RESULTADO ESPERADO: \n")
#dt_inicio <- grep(2000, names(dados))
#dt_final <- grep(2017, names(dados))
#colunas <- c(1:4,dt_inicio:dt_final)
campos <- grep("x", indicadores$ANALYSIS)
campos_nomes <- indicadores[campos, 1:2]
codigo_indicador <- menu(campos_nomes)
dt_analise <- menuData()
n_cluster <- leNumero("Digite a quantidade de clusteres: ")
## Criar funcoes de clusterizacao
}
#########################################
#########################################
#########################################
segundaPergunta <- function() {
cat(
"\nNOME PERGUNTA: \nHa um padrao que relaciona os investimentos em P&D e o aumento do PIB?"
)
cat("\nINDICADORES ANALISADOS: GB.XPD.RSDV.GD.ZS e NY.GDP.MKTP.KD.ZG")
cat(
"\nRESULTADO ESPERADO: Responder se o crescimento do PIB esta diretamente relacionado ao aumento dos investimentos em P&D, ou seja, confirmar se eh de fato um padrao.\n\n"
)
print("######################################")
cat("NY.GDP.MKTP.KD.ZG: GDP growth (annual %)\n")
cat("GB.XPD.RSDV.GD.ZS: Research and development expenditure (% of GDP)\n")
investimentosArray <-
ai(
indicator = c("GB.XPD.RSDV.GD.ZS"),
country = c("BR"),
startdate = 2000,
enddate = 2017
)
investimentosArray <-
investimentosArray[dim(investimentosArray)[1]:1, ]
pibArray <-
ai(
indicator = c("NY.GDP.MKTP.KD.ZG"),
country = c("BR"),
startdate = 2000,
enddate = 2017
)
pibArray <- pibArray[dim(pibArray)[1]:1, ]
cont <- 1
contPibArray <- 1
contInvArray <- 1
resultados <- list(
anoInvestimento = c(),
investimentoNoPrimeiroAno = c(),
investimentoNoSegundoAno = c(),
diferencaInvestimento = c(),
anoPIB = c(),
PIBNoPrimeiroAno = c(),
PIBNoSegundoAno = c(),
diferencaPIB = c(),
padrao = c()
)
while (contInvArray <= nrow(investimentosArray) ||
contPibArray <= nrow(pibArray)) {
while (is.na(investimentosArray[contInvArray, 3])
| is.na(investimentosArray[contInvArray + 1, 3])
| is.na(investimentosArray[contInvArray, 5])
| is.na(investimentosArray[contInvArray + 1, 5])) {
contInvArray <- contInvArray + 1
}
while (is.na(pibArray[contPibArray, 3])
| is.na(pibArray[contPibArray + 1, 3])
| is.na(pibArray[contPibArray, 5])
| is.na(pibArray[contPibArray + 1, 5])) {
contPibArray <- contPibArray + 1
}
anoInvestimento <- investimentosArray[contInvArray, 3]
anoPIB <- pibArray[contPibArray, 3]
while (anoInvestimento != anoPIB - 1) {
if (anoInvestimento < anoPIB) {
contInvArray <- contInvArray + 1
while (is.na(investimentosArray[contInvArray, 3])
|
is.na(investimentosArray[contInvArray + 1, 3])
| is.na(investimentosArray[contInvArray, 5])
|
is.na(investimentosArray[contInvArray + 1, 5])) {
contInvArray <- contInvArray + 1
}
} else if (anoInvestimento >= anoPIB) {
contPibArray <- contPibArray + 1
while (is.na(pibArray[contPibArray, 3])
| is.na(pibArray[contPibArray + 1, 3])
| is.na(pibArray[contPibArray, 5])
| is.na(pibArray[contPibArray + 1, 5])) {
contPibArray <- contPibArray + 1
}
}
anoInvestimento <- investimentosArray[contInvArray, 3]
anoPIB <- pibArray[contPibArray, 3]
}
resultados$anoInvestimento[cont] <- anoInvestimento
resultados$investimentoNoPrimeiroAno[cont] <-
investimentosArray$value[contInvArray]
resultados$investimentoNoSegundoAno[cont] <-
investimentosArray$value[contInvArray + 1]
resultados$diferencaInvestimento[cont] <-
resultados$investimentoNoSegundoAno[cont] - resultados$investimentoNoPrimeiroAno[cont]
resultados$anoPIB[cont] <- anoPIB
resultados$PIBNoPrimeiroAno[cont] <-
pibArray$value[contPibArray]
resultados$PIBNoSegundoAno[cont] <-
pibArray$value[contPibArray + 1]
resultados$diferencaPIB[cont] <-
resultados$PIBNoSegundoAno[cont] - resultados$PIBNoPrimeiroAno[cont]
if (resultados$diferencaInvestimento[cont] > 0 &
resultados$diferencaPIB[cont] > 0) {
resultados$padrao[cont] <- TRUE
} else if (resultados$diferencaInvestimento[cont] < 0 &
resultados$diferencaPIB[cont] < 0) {
resultados$padrao[cont] <- TRUE
} else {
resultados$padrao[cont] <- FALSE
}
cont <- cont + 1
contInvArray <- contInvArray + 1
contPibArray <- contPibArray + 1
if (contInvArray == nrow(investimentosArray) |
contPibArray == nrow(pibArray)) {
break
}
}
print(resultados)
print("######################################")
ay <- list(
tickfont = list(color = "red"),
overlaying = "y",
side = "right",
title = "% do PIB"
)
p <- plot_ly() %>%
add_lines(
x = resultados$anoInvestimento,
y = resultados$investimentoNoPrimeiroAno,
name = "Research and development expenditure (% of GDP)"
) %>%
add_lines(
x = resultados$anoPIB,
y = resultados$PIBNoPrimeiroAno,
name = "GDP growth (annual %)"
) %>%
layout(title = "Comparacao dos indicadores",
xaxis = list(title = "Ano"))
print(p)
print("######################################")
ay <- list(
tickfont = list(color = "red"),
overlaying = "y",
side = "right",
title = "% do PIB"
)
p <- plot_ly() %>%
add_lines(
x = resultados$anoInvestimento,
y = resultados$investimentoNoPrimeiroAno,
name = "Research and development expenditure (% of GDP)"
) %>%
add_lines(
x = resultados$anoPIB,
y = resultados$PIBNoPrimeiroAno,
name = "GDP growth (annual %)",
yaxis = "y2"
) %>%
layout(title = "Comparacao dos indicadores",
yaxis2 = ay,
xaxis = list(title = "Ano"))
print(p)
print("######################################")
ay <- list(
tickfont = list(color = "red"),
overlaying = "y",
side = "right",
title = "% do PIB"
)
p <- plot_ly() %>%
add_lines(
x = resultados$anoInvestimento,
y = resultados$diferencaInvestimento,
name = "Diferenca investimento"
) %>%
add_lines(
x = resultados$anoPIB,
y = resultados$diferencaPIB,
name = "Diferenca PIB",
yaxis = "y2"
) %>%
layout(title = "Comparacao da variacao dos indicadores",
yaxis2 = ay,
xaxis = list(title = "Ano"))
print(p)
print("######################################")
p <- plot_ly() %>%
add_lines(
x = resultados$anoInvestimento,
y = resultados$diferencaInvestimento,
name = "Diferenca investimento"
) %>%
add_lines(x = resultados$anoPIB,
y = resultados$diferencaPIB,
name = "Diferenca PIB") %>%
layout(title = "Comparacao da variacao dos indicadores",
xaxis = list(title = "Ano"))
print(p)
print("######################################")
# df<-ai(indicator = c("NY.GDP.MKTP.KD.ZG","GB.XPD.RSDV.GD.ZS"), country = c("BR"), startdate = 2000)
#
# ay <- list(
# tickfont = list(color = "red"),
# overlaying = "y",
# side = "right",
# title = "% do PIB"
# )
# p <- plot_ly() %>%
# add_lines(x = df[df$src_id_ind=="NY.GDP.MKTP.KD.ZG",]$year, y = df[df$src_id_ind=="NY.GDP.MKTP.KD.ZG",]$value, name = "GDP growth (annual %)") %>%
# add_lines(x = df[df$src_id_ind=="GB.XPD.RSDV.GD.ZS",]$year, y = df[df$src_id_ind=="GB.XPD.RSDV.GD.ZS",]$value, name = "Research and development expenditure (% of GDP)", yaxis = "y2") %>%
# layout(
# title = "Comparacao dos indicadores", yaxis2 = ay,
# xaxis = list(title="Ano")
# )
# print(p)
}
#########################################
#########################################
#########################################
terceiraPergunta <- function() {
# cat("\nNOME PERGUNTA: ???\n")
# cat("\nINDICADORES ANALISADOS: ???")
# cat("\nRESULTADO ESPERADO: ???\n\n")
# print("######################################")
print("###############################################")
print("Adicionando e tratando os dados dos Indicadores")
print("###############################################")
listaDeDadosBR <<-
adicionarIndicador(
listaDeDadosBR,
"NY.GDP.MKTP.KD.ZG",
"BR",
"GDP growth (annual %)",
"Crescrimento do PIB (% anual)"
)
listaDeDadosBR <<-
adicionarIndicador(
listaDeDadosBR,
"GB.XPD.RSDV.GD.ZS",
"BR",
"Research and development expenditure (% of GDP)",
"Gastos com pesquisa e desenvolvimento (% do PIB)"
)
print("!!!!!!!!!")
listaDeDadosBR <<-
adicionarIndicador(
listaDeDadosBR,
"SE.XPD.TOTL.GD.ZS",
"BR",
"Government expenditure on education, total (% of GDP)",
"Gasto governamental em educacao, Total (% do PIB)"
)
listaDeDadosBR <<-
adicionarIndicador(
listaDeDadosBR,
"SE.XPD.TERT.ZS",
"BR",
"Expenditure on tertiary as % of government expenditure on education (%)",
"Gasto em educacao terciaria (superior) como porcentagem do gasto governamental em educacao"
)
listaDeDadosBR <<-
adicionarIndicador(
listaDeDadosBR,
"SE.XPD.SECO.ZS",
"BR",
"Expenditure on secondary as % of government expenditure on education (%)",
"Gasto em educacao secundaria como porcentagem do gasto governamental em educacao"
)
listaDeDadosBR <<-
adicionarIndicador(
listaDeDadosBR,
"SE.XPD.CTOT.ZS",
"BR",
"Current education expenditure, total (% of total expenditure in public institutions)",
"Porcentagem do total de gastos em instituicoes publicas (TOTAL)"
)
listaDeDadosBR <<-
adicionarIndicador(
listaDeDadosBR,
"SE.XPD.CTER.ZS",
"BR",
"Current education expenditure, tertiary (% of total expenditure in tertiary public institutions)",
"Porcentagem do total de gastos em instituicoes publicas (TERCIARIO/SUPERIOR)"
)
listaDeDadosBR <<-
adicionarIndicador(
listaDeDadosBR,
"SE.XPD.CSEC.ZS",
"BR",
"Current education expenditure, secondary (% of total expenditure in secondary public institutions)",
"Porcentagem do total de gastos em instituicoes publicas (SECUNDARIO/MEDIO)"
)
# print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# print(listaDeDadosBR$dadosTratados)
# print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# print(listaDeDadosBR$dadosTratados[[1]])
# print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# print(listaDeDadosBR$dadosTratados[[1]]$primeiroAno)
# print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# print(listaDeDadosBR$dadosTratados[[1]]$indicadorNoPrimeiroAno)
# print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# print(listaDeDadosBR$dadosTratados[[1]]$primeiroAno[[1]])
# print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# print(listaDeDadosBR$dadosTratados[[1]]$indicadorNoPrimeiroAno[[1]])
print("###############################################")
print("Plotando graficos")
print(" > Dados brutos")
print(" > Diferenca entre 2 anos consecutivos")
print(" > Media entre 2 anos consecutivos")
print("###############################################")
p <- plot_ly() %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[1]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[1]]$indicadorNoPrimeiroAno,
name = listaDeDadosBR$tituloPortugues[[1]]
) %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[2]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[2]]$indicadorNoPrimeiroAno,
name = listaDeDadosBR$tituloPortugues[[2]]
) %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[3]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[3]]$indicadorNoPrimeiroAno,
name = listaDeDadosBR$tituloPortugues[[3]]
) %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[4]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[4]]$indicadorNoPrimeiroAno,
name = listaDeDadosBR$tituloPortugues[[4]]
) %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[5]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[5]]$indicadorNoPrimeiroAno,
name = listaDeDadosBR$tituloPortugues[[5]]
) %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[6]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[6]]$indicadorNoPrimeiroAno,
name = listaDeDadosBR$tituloPortugues[[6]]
) %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[7]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[7]]$indicadorNoPrimeiroAno,
name = listaDeDadosBR$tituloPortugues[[7]]
) %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[8]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[8]]$indicadorNoPrimeiroAno,
name = listaDeDadosBR$tituloPortugues[[8]]
) %>%
layout(title = "Comparacao dos indicadores (VALOR BRUTO)",
xaxis = list(title = "Ano"))
print(p)
p <- plot_ly() %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[1]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[1]]$diferencaIndicador,
name = listaDeDadosBR$tituloPortugues[[1]]
) %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[2]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[2]]$diferencaIndicador,
name = listaDeDadosBR$tituloPortugues[[2]]
) %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[3]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[3]]$diferencaIndicador,
name = listaDeDadosBR$tituloPortugues[[3]]
) %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[4]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[4]]$diferencaIndicador,
name = listaDeDadosBR$tituloPortugues[[4]]
) %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[5]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[5]]$diferencaIndicador,
name = listaDeDadosBR$tituloPortugues[[5]]
) %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[6]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[6]]$diferencaIndicador,
name = listaDeDadosBR$tituloPortugues[[6]]
) %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[7]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[7]]$diferencaIndicador,
name = listaDeDadosBR$tituloPortugues[[7]]
) %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[8]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[8]]$diferencaIndicador,
name = listaDeDadosBR$tituloPortugues[[8]]
) %>%
layout(title = "Comparacao dos indicadores (DIFERENcA ENTRE ANOS CONSECUTIVOS)",
xaxis = list(title = "Ano"))
print(p)
p <- plot_ly() %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[1]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[1]]$mediaIndicador,
name = listaDeDadosBR$tituloPortugues[[1]]
) %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[2]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[2]]$mediaIndicador,
name = listaDeDadosBR$tituloPortugues[[2]]
) %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[3]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[3]]$mediaIndicador,
name = listaDeDadosBR$tituloPortugues[[3]]
) %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[4]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[4]]$mediaIndicador,
name = listaDeDadosBR$tituloPortugues[[4]]
) %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[5]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[5]]$mediaIndicador,
name = listaDeDadosBR$tituloPortugues[[5]]
) %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[6]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[6]]$mediaIndicador,
name = listaDeDadosBR$tituloPortugues[[6]]
) %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[7]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[7]]$mediaIndicador,
name = listaDeDadosBR$tituloPortugues[[7]]
) %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[8]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[8]]$mediaIndicador,
name = listaDeDadosBR$tituloPortugues[[8]]
) %>%
layout(title = "Comparacao dos indicadores (MÉDIA ENTRE A DIFERENcA DE DOIS ANOS)",
xaxis = list(title = "Ano"))
print(p)
p <- plot_ly() %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[1]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[1]]$diferencaIndicador,
name = listaDeDadosBR$tituloPortugues[[1]]
) %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[2]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[2]]$diferencaIndicador,
name = listaDeDadosBR$tituloPortugues[[2]]
) %>%
layout(title = "Crescimento do PIB x Gastos com pesquisa e desenvolvimento (% do PIB)",
xaxis = list(title = "Ano"))
print(p)
p <- plot_ly() %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[1]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[1]]$diferencaIndicador,
name = listaDeDadosBR$tituloPortugues[[1]]
) %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[3]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[3]]$diferencaIndicador,
name = listaDeDadosBR$tituloPortugues[[3]]
) %>%
layout(title = "Crescimento do PIB x Gasto governamental em educacao, TOTAL",
xaxis = list(title = "Ano"))
print(p)
p <- plot_ly() %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[1]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[1]]$diferencaIndicador,
name = listaDeDadosBR$tituloPortugues[[1]]
) %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[4]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[4]]$diferencaIndicador,
name = listaDeDadosBR$tituloPortugues[[4]]
) %>%
layout(title = "Crescimento do PIB x Gasto em educacao terciaria",
xaxis = list(title = "Ano"))
print(p)
p <- plot_ly() %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[1]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[1]]$diferencaIndicador,
name = listaDeDadosBR$tituloPortugues[[1]]
) %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[6]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[6]]$diferencaIndicador,
name = listaDeDadosBR$tituloPortugues[[6]]
) %>%
layout(title = "Crescimento do PIB x Gastos em educacao em instituicoes públicas (TOTAL)",
xaxis = list(title = "Ano"))
print(p)
p <- plot_ly() %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[1]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[1]]$diferencaIndicador,
name = listaDeDadosBR$tituloPortugues[[1]]
) %>%
add_lines(
x = listaDeDadosBR$dadosTratados[[7]]$primeiroAno,
y = listaDeDadosBR$dadosTratados[[7]]$diferencaIndicador,
name = listaDeDadosBR$tituloPortugues[[7]]
) %>%
layout(title = "Crescimento do PIB x Gastos em educacao terciaria (superior) em instituicoes públicas",
xaxis = list(title = "Ano"))
print(p)
print("###############################################")
print("Analisando Padroes")
print("###############################################")
listaPadroesDeDadosBR1x1 <<- verificarPadrao1x1(listaDeDadosBR, list(1), list(2,3,4,5,6,7,8), listaPadroesDeDadosBR1x1, 0)
listaPadroesDeDadosBR1x1 <<- verificarPadrao1x1(listaDeDadosBR, list(1), list(2,3,4,5,6,7,8), listaPadroesDeDadosBR1x1, 1)
# listaPadroesDeDadosBR1xN <<- verificarPadrao1xN(listaDeDadosBR, list(1), list(2,3,4,5,6,7,8), listaPadroesDeDadosBR1xN, 1)
}
#########################################
#########################################
#########################################
quartaPergunta <- function() {
cat("\nNOME PERGUNTA: \nRelação entre o crescimento de mulheres fumando durante a gravidez com o número de mortes neonatais")
cat("\nDADOS ANALISADOS: ")
cat("\nRESULTADO ESPERADO: \n\n")
# PARA VER COMO A BIBLIOTECA FUNCIONA, ACESSAR:
# https://rdrr.io/github/EL-BID/Agregador-de-indicadores/f/README.md
print("######################################")
# Analisando o numero de mulheres fumantes durante a gravidez(SH.PRV.SMOK.FE)
fumante<-ai(indicator = c("SH.PRV.SMOK.FE"), country = c("BR"), startdate = 2000, enddate=2015)
print(fumante[,1:6])
# Analisando o numero de mortes neonatais(SH.DYN.NMRT)
neonatalDeath<-ai(indicator = c("SH.DYN.NMRT"), country = c("BR"), startdate = 2000, enddate=2015)
print(neonatalDeath[,1:6])
prenatal<-ai(indicator = c("SH.STA.ANVC.ZS"), country = c("BR"), startdate = 2000, enddate=2015)
print(neonatalDeath[,1:6])
print("######################################")
# EXEMPLO: Extract Specific columns.
#result <- data.frame(paises$"ï..Country.Code")
#result
# max(investPorPais$x2011,na.rm=TRUE)
# retval <- subset(investPorPais, investPorPais$x2011 == max(investPorPais$x2011,na.rm = TRUE ))
# print(retval, "\n\n")
df<-ai(indicator = c("SH.PRV.SMOK.FE","SH.DYN.NMRT", "SH.STA.ANVC.ZS"), country = c("BR"), startdate = 2000)
ay <- list(
tickfont = list(color = "red"),
overlaying = "y",
side = "right",
title = "% de mulheres que fumam durante gravidez"
)
p <- plot_ly() %>%
add_lines(x = df[df$src_id_ind=="SH.PRV.SMOK.FE",]$year, y = df[df$src_id_ind=="SH.PRV.SMOK.FE",]$value, name = "Mulheres que fumam durante gravidez") %>%
add_lines(x = df[df$src_id_ind=="SH.DYN.NMRT",]$year, y = df[df$src_id_ind=="SH.DYN.NMRT",]$value, name = "% de mortes neonatais (a cada 1.000)", yaxis = "y2") %>%
add_lines(x = df[df$src_id_ind=="SH.STA.ANVC.ZS",]$year, y = df[df$src_id_ind=="SH.STA.ANVC.ZS",]$value, name = "% de mulheres que receberam cuidado prenatal", yaxis = "y2") %>%
layout(
title = "Comparacao dos indicadores", yaxis2 = ay,
xaxis = list(title="Ano")
)
print(p)
}
#########################################
#########################################
#########################################
quintaPergunta <- function() {
cat("NOME PERGUNTA: quintaPergunta")
cat("DADOS ANALISADOS: ")
cat("RESULTADO ESPERADO: ")
listaDeDadosBR2 <<-
adicionarIndicador(
listaDeDadosBR2,
"NY.GDP.MKTP.KD.ZG",
"BR",
"GDP growth (annual %)",
"Crescrimento do PIB (% anual)"
)
listaDeDadosBR2 <<-
adicionarIndicador(
listaDeDadosBR2,
"NV.IND.TOTL.ZS",
"BR",
"% Crescimento indrustria (% of GDP)",
"Crescimento indrustria (% of GDP)"
)
p <- plot_ly() %>%
add_lines(
x = listaDeDadosBR2$dadosTratados[[1]]$primeiroAno,
y = listaDeDadosBR2$dadosTratados[[1]]$diferencaIndicador,
name = listaDeDadosBR2$tituloPortugues[[1]]
) %>%
add_lines(
x = listaDeDadosBR2$dadosTratados[[2]]$primeiroAno,
y = listaDeDadosBR2$dadosTratados[[2]]$diferencaIndicador,
name = listaDeDadosBR2$tituloPortugues[[2]]
) %>%
layout(title = "Comparacao entre Crescimento do PIB e Crescimento Industria",
xaxis = list(title = "Ano"))
print(p);
#listaPadroesDeDadosBR1x1 <<- verificarPadrao1x1(listaDeDadosBR2, list(1), list(2,3,4,5,6,7,8), listaPadroesDeDadosBR1x1, 1)
}
|
068033e531e77fc6613451f2f1ed9e57d7a0da0c
|
6c324098a05774faec4d3aac4409fc15b62e54f1
|
/man/cellsceek_test.Rd
|
67cb5b453640bba851ba7a5921cdee41a8370b10
|
[] |
no_license
|
EugOT/trendsceek
|
20d020549ec3b51251ee87db6412b4f1bf768131
|
2688bf4fcd3635aeebfed7cbb74cb66b9379f58a
|
refs/heads/master
| 2020-08-02T20:25:20.315353
| 2019-09-28T23:19:06
| 2019-09-28T23:19:06
| 211,497,148
| 0
| 0
| null | 2019-09-28T12:29:26
| 2019-09-28T12:29:26
| null |
UTF-8
|
R
| false
| true
| 1,489
|
rd
|
cellsceek_test.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trendsceek.R
\name{cellsceek_test}
\alias{cellsceek_test}
\title{Identify cells located in regions exceeding random background expression level}
\usage{
cellsceek_test(pp, nrand = 100, cell_alpha = 0.05, h = NA)
}
\arguments{
\item{pp}{A point pattern with one or more mark distributions.}
\item{nrand}{An integer specifying the number of random resamplings of the mark distribution as to create the null-distribution.}
\item{cell_alpha}{A numeric specifying a signficance level which is used to flag if a cell has significantly higher expression than expected by random for a particular gene or not.}
\item{h}{A numeric vector of length 2 specifying the bandwidth of the two-dimensional Gaussian kernel (x, y).}
}
\value{
A list containing statistics for all cells for each gene.
}
\description{
\code{cellsceek_test} identifies cells located in regions with higher expression level than expected by random. The spatial distribution is presumed to be fixed and conditioned on that, the test assesses whether cells are in a region with high expression levels. The background expression 2-dimensional null-distribution is generated by random resampling of the mark distribution followed by 2-dimensional kernel density estimate for each resampling.
}
\examples{
pp = sim_pois(100)
low_expr = c(10, 10)
high_expr = c(15, 20)
pp = add_markdist_streak(pp, low_expr, high_expr)
cellpeaks = cellsceek_test(pp)
}
|
ac5de57145544ef9578f10ad5b6692c0d07716db
|
63e1231faa30a4cea6dd9f25e87c2372383aa2f4
|
/man/calcMean.Rd
|
60e6dfdc51c555d3dce83bc6eda33c32bdfcc432
|
[] |
no_license
|
cran/MSEtool
|
35e4f802f1078412d5ebc2efc3149c46fc6d13a5
|
6b060d381adf2007becf5605bc295cca62f26770
|
refs/heads/master
| 2023-08-03T06:51:58.080968
| 2023-07-19T22:10:23
| 2023-07-20T01:47:18
| 145,912,213
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 327
|
rd
|
calcMean.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PMobjects.R
\name{calcMean}
\alias{calcMean}
\title{Calculate Mean Probability}
\usage{
calcMean(Prob)
}
\arguments{
\item{Prob}{Prob slot from an object of class PMobj}
}
\description{
Calculate Mean Probability
}
\keyword{internal}
|
f46abfa345858a18e0398f391a5225a1860719e3
|
6e515ec790d899c0cca6da400cf8f8a5790a5eeb
|
/cachematrix.R
|
e4055e7ecc8398b914ec84e796eb7ec380517165
|
[] |
no_license
|
pickletime/ProgrammingAssignment2
|
5a2ce52a0e2fbc78e39cd633b358d5200ba066b4
|
66a332bb44670434b4add8903596e4416a4d4830
|
refs/heads/master
| 2021-01-21T06:30:48.290577
| 2017-09-01T02:38:42
| 2017-09-01T02:38:42
| 101,943,359
| 0
| 0
| null | 2017-08-31T00:59:48
| 2017-08-31T00:59:48
| null |
UTF-8
|
R
| false
| false
| 1,443
|
r
|
cachematrix.R
|
##This function creates a special "matrix" object that can cache its inverse
##Then able to return the inverse of aforementioned special "matrix" from cache
makeCacheMatrix <- function(x = matrix()) {
#set value of the matrix, get value of matrix
#set value of the inverse, get the value of the inverse
m <- NULL
set <- function(y) {
# <<- operator assigns values from non-current environment
x <<- y
m <<- NULL
}
#set to null
get <- function() x
set.inverse <- function(matrix.inversed) m <<- matrix.inversed
get.inverse <- function() m
list(set = set, get = get,
set.inverse = set.inverse, get.inverse = get.inverse)
#define list of functions to be accessed later
}
#This function computes the inverse of the special "matrix" returned by
#makeCacheMatrix above. If the inverse has already been calculated
#(and the matrix has not changed), then cacheSolve should retrieve the inverse
#from the cache.
cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x'
m <- x$get.inverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
#if inverse calculated, returns it.
data <- x$get()
m <- solve(data)
x$set.inverse(m)
m
#if inverse not calculated, calculates, returns value
}
|
7014beae0abbd182c7e84ba5b93a4c7ed7b6b5b5
|
4459eb5432916b4ad6c5c5d911b50c9d2fec1ad5
|
/sandbox/pulkit/man/CDaR.Rd
|
bdb7a786b3f579a195d85d2dbc067a61c1a14998
|
[] |
no_license
|
braverock/PerformanceAnalytics
|
057af55b0a4ddeb4befcc02e36a85f582406b95c
|
49a93f1ed6e2e159b63bf346672575f3634ed370
|
refs/heads/master
| 2023-08-03T10:18:27.115592
| 2023-03-29T09:23:17
| 2023-03-29T09:23:17
| 58,736,268
| 209
| 113
| null | 2023-05-23T17:46:08
| 2016-05-13T12:02:42
|
R
|
UTF-8
|
R
| false
| false
| 1,449
|
rd
|
CDaR.Rd
|
\name{CDaR}
\alias{CDaR}
\alias{CDD}
\title{Calculate Uryasev's proposed Conditional Drawdown at Risk (CDD or CDaR)
measure}
\usage{
CDaR(R, weights = NULL, geometric = TRUE, invert = TRUE,
p = 0.95, ...)
}
\arguments{
\item{R}{an xts, vector, matrix, data frame, timeSeries
or zoo object of asset returns}
\item{weights}{portfolio weighting vector, default NULL,
see Details}
\item{geometric}{utilize geometric chaining (TRUE) or
simple/arithmetic chaining (FALSE) to aggregate returns,
default TRUE}
\item{invert}{TRUE/FALSE whether to invert the drawdown
measure. see Details.}
\item{p}{confidence level for calculation, default
p=0.95}
\item{\dots}{any other passthru parameters}
}
\description{
For some confidence level \eqn{p}, the conditional
drawdown is the the mean of the worst \eqn{p\%}
drawdowns.
}
\examples{
library(lpSolve)
data(edhec)
t(round(CDaR(edhec),4))
}
\author{
Brian G. Peterson
}
\references{
Chekhlov, A., Uryasev, S., and M. Zabarankin. Portfolio
Optimization With Drawdown Constraints. B. Scherer (Ed.)
Asset and Liability Management Tools, Risk Books, London,
2003 http://www.ise.ufl.edu/uryasev/drawdown.pdf
}
\seealso{
\code{\link{ES}} \code{\link{maxDrawdown}}
\code{\link{CdarMultiPath}} \code{\link{AlphaDrawdown}}
\code{\link{MultiBetaDrawdown}}
\code{\link{BetaDrawdown}}
}
\keyword{distribution}
\keyword{models}
\keyword{multivariate}
\keyword{ts}
|
9f26688e3a90afa0e5fd57bc8ee4cdf8c02c06f2
|
5b3e98e1db69b06192b063005e80c8fd417520cc
|
/Siwei_analysis/code_R/plotting/code_30Aug2021_plot_Fig_S8_merged.R
|
ef9c9ed7a3a69a815e64661846b52c3222aced07
|
[] |
no_license
|
endeneon/VPS45_repo
|
b2a48291bc209a058918af9038cdf4283dd9ae9c
|
344d54c9ced3cee055cb5e0f5856dd67893977bd
|
refs/heads/main
| 2023-08-08T00:23:24.599726
| 2023-07-24T19:37:56
| 2023-07-24T19:37:56
| 351,939,958
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,368
|
r
|
code_30Aug2021_plot_Fig_S8_merged.R
|
# Siwei 27 Aug 2021
# Re-plot Fig S8 with merged data
# init
library(ggplot2)
library(RColorBrewer)
library(readxl)
# > RColorBrewer::brewer.pal(3, "Dark2")
# [1] "#1B9E77" "#D95F02" "#7570B3"
# load data
# weighted mean firing rate
df_to_plot <-
read_excel("Fig_S8_rev_data.xlsx",
sheet = "Weighted_MFR")
ggplot(data = df_to_plot,
aes(x = Day,
colour = Genotype,
y = Value)) +
geom_line(size = 1) +
geom_point(shape = 21,
fill = "white") +
scale_color_discrete(type = brewer.pal(3, "Dark2")) +
ylab("Hz") +
theme_classic() +
ggtitle("Weighted Mean Firing Rate (Hz)") +
theme(axis.text = element_text(size = 14),
axis.title = element_text(size = 14))
# No_of_bursts
df_to_plot <-
read_excel("Fig_S8_rev_data.xlsx",
sheet = 2)
ggplot(data = df_to_plot,
aes(x = Day,
colour = Genotype,
y = Value)) +
geom_line(size = 1) +
geom_point(shape = 21,
fill = "white") +
scale_color_discrete(type = brewer.pal(3, "Dark2")) +
ylab("# of events/10 min") +
theme_classic() +
ggtitle("Number of Bursts") +
theme(axis.text = element_text(size = 14),
axis.title = element_text(size = 14))
# Burst Duration
df_to_plot <-
read_excel("Fig_S8_rev_data.xlsx",
sheet = 3)
ggplot(data = df_to_plot,
aes(x = Day,
colour = Genotype,
y = Value)) +
geom_line(size = 1) +
geom_point(shape = 21,
fill = "white") +
scale_color_discrete(type = brewer.pal(3, "Dark2")) +
ylab("Duration (s)") +
theme_classic() +
ggtitle("Burst Duration") +
theme(axis.text = element_text(size = 14),
axis.title = element_text(size = 14))
# Network Burst Frequency
df_to_plot <-
read_excel("Fig_S8_rev_data.xlsx",
sheet = 4)
ggplot(data = df_to_plot,
aes(x = Day,
colour = Genotype,
y = Value)) +
geom_line(size = 1) +
geom_point(shape = 21,
fill = "white") +
scale_color_discrete(type = brewer.pal(3, "Dark2")) +
ylab("Hz") +
theme_classic() +
ggtitle("Network Burst Frequency") +
theme(axis.text = element_text(size = 14),
axis.title = element_text(size = 14))
# Network Burst Duration
df_to_plot <-
read_excel("Fig_S8_rev_data.xlsx",
sheet = 5)
ggplot(data = df_to_plot,
aes(x = Day,
colour = Genotype,
y = Value)) +
geom_line(size = 1) +
geom_point(shape = 21,
fill = "white") +
scale_color_discrete(type = brewer.pal(3, "Dark2")) +
ylab("Duration (s)") +
theme_classic() +
ggtitle("Network Burst Duration") +
theme(axis.text = element_text(size = 14),
axis.title = element_text(size = 14))
# Synchrony Index
df_to_plot <-
read_excel("Fig_S8_rev_data.xlsx",
sheet = 6)
ggplot(data = df_to_plot,
aes(x = Day,
colour = Genotype,
y = Value)) +
geom_line(size = 1) +
geom_point(shape = 21,
fill = "white") +
scale_color_discrete(type = brewer.pal(3, "Dark2")) +
ylab("Duration (s)") +
theme_classic() +
ggtitle("Index") +
theme(axis.text = element_text(size = 14),
axis.title = element_text(size = 14))
|
db9ee1e0ae5a221072223b522ee927a6123fdcfd
|
bbd73e22684497a51ec61013277dcce89d417485
|
/src/regression.R
|
74d976816f4b77da092866d096df71fefd6e6e75
|
[] |
no_license
|
tlaepple/paleolibrary
|
a432c6405388474ca3295fdc2aadd65f9f6edbac
|
daed039f08854dcbb99b8194fd99ce7fec662842
|
refs/heads/master
| 2021-01-23T08:34:51.242448
| 2011-10-31T18:27:44
| 2011-10-31T18:27:44
| 2,659,270
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,508
|
r
|
regression.R
|
#http://www.rsc.org/images/brief10_tcm18-25920.pdf
#http://finzi.psych.upenn.edu/Rhelp10/2010-February/227865.html
## implemented by Thierry Themenau
# BD Ripley and M Thompson, Regression techniques for the detection
#of analytical bias, Analyst 112:377-383, 1987.
deming<- function(x, y, xstd, ystd, jackknife=TRUE, dfbeta=FALSE,
scale=TRUE) {
Call <- match.call()
n <- length(x)
if (length(y) !=n) stop("x and y must be the same length")
if (length(xstd) != length(ystd))
stop("xstd and ystd must be the same length")
# Do missing value processing
nafun <- get(options()$na.action)
if (length(xstd)==n) {
tdata <- nafun(data.frame(x=x, y=y, xstd=xstd, ystd=ystd))
x <- tdata$x
y <- tdata$y
xstd <- tdata$xstd
ystd <- tdata$ystd
}
else {
tdata <- nafun(data.frame(x=x, y=y))
x <- tdata$x
y <- tdata$y
if (length(xstd) !=2) stop("Wrong length for std specification")
xstd <- xstd[1] + xstd[2]*x
ystd <- ystd[1] + ystd[2] * y
}
if (any(xstd <=0) || any(ystd <=0)) stop("Std must be positive")
minfun <- function(beta, x, y, xv, yv) {
w <- 1/(yv + beta^2*xv)
alphahat <- sum(w * (y - beta*x))/ sum(w)
sum(w*(y-(alphahat + beta*x))^2)
}
minfun0 <- function(beta, x, y, xv, yv) {
w <- 1/(yv + beta^2*xv)
alphahat <- 0 #constrain to zero
sum(w*(y-(alphahat + beta*x))^2)
}
afun <-function(beta, x, y, xv, yv) {
w <- 1/(yv + beta^2*xv)
sum(w * (y - beta*x))/ sum(w)
}
fit <- optimize(minfun, c(.1, 10), x=x, y=y, xv=xstd^2, yv=ystd^2)
coef = c(intercept=afun(fit$minimum, x, y, xstd^2, ystd^2),
slope=fit$minimum)
fit0 <- optimize(minfun0, coef[2]*c(.5, 1.5), x=x, y=y,
xv=xstd^2, yv=ystd^2)
w <- 1/(ystd^2 + (coef[2]*xstd)^2) #weights
u <- w*(ystd^2*x + xstd^2*coef[2]*(y-coef[1])) #imputed "true" value
if (is.logical(scale) && scale) {
err1 <- (x-u)/ xstd
err2 <- (y - (coef[1] + coef[2]*u))/ystd
sigma <- sum(err1^2 + err2^2)/(n-2)
# Ripley's paper has err = [y - (a + b*x)] * sqrt(w); gives the same SS
}
else sigma <- scale^2
test1 <- (coef[2] -1)*sqrt(sum(w *(x-u)^2)/sigma) #test for beta=1
test2 <- coef[1]*sqrt(sum(w*x^2)/sum(w*(x-u)^2) /sigma) #test for a=0
rlist <- list(coefficient=coef, test1=test1, test0=test2, scale=sigma,
err1=err1, err2=err2, u=u)
if (jackknife) {
delta <- matrix(0., nrow=n, ncol=2)
for (i in 1:n) {
fit <- optimize(minfun, c(.5, 1.5)*coef[2],
x=x[-i], y=y[-i], xv=xstd[-i]^2, yv=ystd[-i]^2)
ahat <- afun(fit$minimum, x[-i], y[-i], xstd[-i]^2, ystd[-i]^2)
delta[i,] <- coef - c(ahat, fit$minimum)
}
rlist$variance <- t(delta) %*% delta
if (dfbeta) rlist$dfbeta <- delta
}
rlist$call <- Call
class(rlist) <- 'deming'
rlist
}
### Monte Carlo Comparison of WLSQ and OLSBC
#r1<-r2<-vector()
#for (i in 1:100)
#{
#sx<-50
#sy<-10
#x<-1:100+rnorm(100)*sx
#y<-1:100+rnorm(100)*sy
#kappa<-1/((1-sx^2/var(x)))
#r1[i]<-lm(y~x)$coeff[2]*kappa
#r2[i]<-deming(x,y,rep(sx,100),rep(sy,100))$coef[2]
#}
|
29a204b7bf6df8f9ae0925b5487eed8040729e33
|
96ad58e95c8d7c78b86cde1c8843078dbc09c526
|
/SingleCellSignalR/man/visualize.Rd
|
25d9502ca458079f6b7e837bbac867446041bc53
|
[] |
no_license
|
MaximilianLombardo/SingleCellSignalR_v1
|
d66a42f745caff337824fb5206fbc4d529cecaae
|
feecb30e9ed05b55d672cd4efaebccdbf8260ad0
|
refs/heads/master
| 2022-12-06T06:59:24.638530
| 2020-08-16T15:51:57
| 2020-08-16T15:51:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,174
|
rd
|
visualize.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualize.R
\name{visualize}
\alias{visualize}
\title{Visualize}
\usage{
visualize(
inter,
show.in = NULL,
write.in = NULL,
write.out = FALSE,
method = "default",
limit = 30
)
}
\arguments{
\item{inter}{a list of data frames result of the **cell_signaling()** function}
\item{show.in}{a vector of which elements of ```inter``` must be shown}
\item{write.in}{a vector of which elements of ```inter``` must be written}
\item{write.out}{a logical}
\item{method}{a string (usually relative to the experiment)}
\item{limit}{a value between 1 and number of interactions}
}
\value{
The function returns images in the plot window of Rstudio and images in the pdf format in the *images* folder.
}
\description{
Creates chord diagrams from the interactions tables.
}
\examples{
int.1 = matrix(c("gene 1","gene 1", "gene 2", "gene 3"),ncol=2)
colnames(int.1) = c("cluster 1","cluster 2" )
int.2 = matrix(c("gene 1","gene 4","gene 4","gene 2","gene 3","gene 3"),ncol=2)
colnames(int.2) = c("cluster 1","cluster 3" )
inter = list(int.1,int.2)
names(inter) = c("1-2","1-3")
visualize(inter)
}
|
09d6a1d3619dc807ee08e66c0f99048866d4c429
|
aa979eb405e4ab209e337a001b42721fcc384526
|
/scripts_mutsig_to_muts/deconstructSigs.r
|
7e7e6da127548d461289bf072080af50d523a3fc
|
[] |
no_license
|
msecrier/assign_mutsigs_with_mutations
|
d545bd36f13747264884581747a3b40bbbb0459e
|
d175ab40b95e5c15b5bd1b7b0a19531f15489606
|
refs/heads/master
| 2023-03-20T19:14:31.158181
| 2021-03-26T13:44:24
| 2021-03-26T13:44:24
| 274,712,177
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,291
|
r
|
deconstructSigs.r
|
args = commandArgs(trailingOnly=TRUE)
## load libraries
library(deconstructSigs)
library(gplots)
# read the cancer type name
input_file=args[1]
output_file=args[2]
seq_type=args[3]
path_cosmic=args[4]
print(input_file)
load(path_cosmic)
# define which type of count we should be using!
if (seq_type == 'wes') {
counts_method <- 'exome2genome'
} else {
counts_method <- 'default'
}
x<-read.table(input_file, header=T, sep="\t")
names(x) <- c("Sample","chr","pos","ref","alt","mutation")
print("mut.to.sigs.input step")
# Convert to deconstructSigs input
# this step generates the matrix suitable for the program
sigs.input <- mut.to.sigs.input(mut.ref = x,
sample.id = "Sample",
chr = "chr",
pos = "pos",
ref = "ref",
alt = "alt")
# remove samples with few mutations
samples_to_save <- rownames(sigs.input[apply(sigs.input,1,sum) >= 50,])
# now we run deconstructSigs for each sample in our input list
flag = 0
list_samples<-intersect(samples_to_save,unique(x$Sample))
sigs.input<-sigs.input[rownames(sigs.input)%in%list_samples,]
print(dim(sigs.input))
print(dim(signatures.cosmic))
print(counts_method)
for (sample in list_samples)
{
print(sample)
test = whichSignatures(tumor.ref = sigs.input,
signatures.ref = as.data.frame(t(signatures.cosmic)),
sample.id = sample,
contexts.needed = TRUE,
tri.counts.method = counts_method,
signature.cutoff = 0)
a = test$weights # save the weights for each signature.
a['SSE'] = round(sqrt(sum(test$diff * test$diff)), digits = 3) # compute the error rate
a['mutation_count'] = nrow(x[which(x$Sample==sample),]) # number of mutations
# append the results of each sample in to dataframe
if (flag == 0){total = a; flag=1}
else{total <- rbind(total, a)}
}
# prepare CSV file
myDF <- cbind(sample_id = rownames(total), total) # assign row names
rownames(myDF) <- NULL
print("write output")
# write the output to a file
write.table(myDF, file=output_file, sep="\t", col.names = TRUE, row.names=FALSE)
|
a4855e5a3662b2917c65b26ffd1334b33755cb67
|
de90e57188cdc9c7f140230e610aed02e3ffd14a
|
/ui.R
|
afbeee4462f37cf1862e2c40784b3a2800b8ff3c
|
[] |
no_license
|
patrickfonti/Ltal_Project
|
8571935f3cc59e537ee377dff11c12acc8bd7657
|
764ab840d8291e673e8b05c7ea1a344ceaee68c4
|
refs/heads/main
| 2023-03-06T15:37:53.938964
| 2021-02-14T16:32:09
| 2021-02-14T16:32:09
| 332,077,793
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,839
|
r
|
ui.R
|
#
# This is to plot data from the Loetchental project
#
# add bslib thematic
material <- bs_theme(
bg = "#202123",
fg = "#B8BCC2", #'white',
primary = "#EA80FC",
secondary = "#00DAC6",
success = "#4F9B29",
info = "#28B3ED",
warning = "#FD7424",
danger = "#F7367E",
base_font = font_google("Indie Flower"), #"Open Sans"
heading_font = font_google("Indie Flower"), #"Proza Libre"
code_font = font_google("Indie Flower") #"Fira Code"
)
# bs_theme_preview(material, with_themer = TRUE)
# Add thematic for ggplot
thematic_on(bg = '#202123', fg = 'white', accent = 'auto', font = font_spec("Indie Flower", scale = 2))
######### SHINY.UI #########
ui <- fluidPage(
theme = material,
tabsetPanel(id = 'tspan',
# Home --------------------------------------------------------------------
tabPanel('Home', icon=icon("home", lib = "font-awesome"), value = '#home',
titlePanel("Research setting"),
sidebarLayout(
sidebarPanel(),
mainPanel(
plotlyOutput(outputId = "ggplot.setting", height = "300px"),
verbatimTextOutput("plotly_click"),
tags$a(href = "https://www.wsl.ch/en/tree-ring-research/the-loetschental-tree-growth-monitoring-transect.html", "Source: Lötschental transect", target = "_blank")
)
)
),
# Campbell --------------------------------------------------------------------
tabPanel('Campbell', icon=icon("phone-alt", lib = "font-awesome"), value = '#campbell',
titlePanel("Campbell data from Lötschental sites"),
sidebarLayout(
sidebarPanel(
# Select type of trend to plot
selectizeInput(inputId = "site", label = strong("Site"), choices = unlist(lapply(names(DATA), FUN=extract2)), selected = "N08b"),
# Select date range to be plotted
dateRangeInput("date", strong("Date range"), start = "2020-01-01", end = Sys.Date(),
min = "2020-01-01", max = Sys.Date()) #"2021-07-31"
# # Select whether to overlay smooth trend line
# checkboxInput(inputId = "smoother", label = strong("Overlay smooth trend line"), value = FALSE),
#
# # Display only if the smoother is checked
# conditionalPanel(condition = "input.smoother == true",
# sliderInput(inputId = "f", label = "Smoother span:",
# min = 0.01, max = 1, value = 0.25, step = 0.01,
# animate = animationOptions(interval = 100)),
# HTML("Higher values give more smoothness.")
# )
),
# Output: Description, lineplot, and reference
mainPanel(
plotOutput(outputId = "ggplot.Batt", height = "150px"),
plotOutput(outputId = "ggplot.Temp", height = "150px"),
plotOutput(outputId = "ggplot.Dendro", height = "250px"),
plotOutput(outputId = "ggplot.Sapflow", height = "250px")
)
)) ,
# Dendrometers ----------------------------------------------------------------------
tabPanel('Dendrometers', icon=icon("chart-line", lib = "font-awesome"),
titlePanel("Dendrometer data 2007-2020"),
sidebarLayout(
sidebarPanel(
# Select type of trend to plot
selectizeInput(inputId = "siteD", label = strong("Site"), choices = c(Setting$Site)[c(10,7,8,3,2,1,11,12,14,9,4,5,6,13)], selected = "N08"),
# # Select date range to be plotted
# dateRangeInput("dateD", strong("Date range"), start = "2018-01-01", end = "2020-12-31",
# min = "2006-01-01", max = Sys.Date()), #"2021-07-31"
# Select species and dendrometer plotted
radioButtons("species", strong("Select species"), choices=c("L","S","both"), selected="L"),
radioButtons("type", strong("Select type"), choices=c("p","c","both"), selected="p"),
# Select whether to scale
checkboxInput(inputId = "showTWD", label = strong("Show TWD"), value = FALSE),
checkboxInput(inputId = "showdaily", label = strong("Show Daily values"), value = FALSE),
# Display only if the showdaily is checked
conditionalPanel(condition = "input.showdaily == true",
selectizeInput(inputId = "Parameter", label = strong("Select daily parameter"), choices = c("amplitude", "time_min","time_max", "min", "max"), selected = "amplitude"),
checkboxGroupInput(inputId = "Sensor", label = strong("Select Tree"), choices = ""), # levels(DAILY.DATA$dmID), selected = DAILY.DATA$dmID[1]),
conditionalPanel(condition = "input.showdaily == true",
checkboxGroupInput(inputId = "Year", label = strong("Select year(s)"),choices = "") #levels(DAILY.DATA$year)
))
),
mainPanel(
shinyWidgets::sliderTextInput(inputId = "slider", label = "Time",
choices = unique(format(DENDRO$Index, format="%b%Y")),
selected = c("Jan2018", max(format(DENDRO$Index, format="%b%Y"))), # min(format(DENDRO$Index, format="%b%Y"))
grid = FALSE, width = "100%"),
plotOutput(outputId = "ggplot.DENDRO", height = "500px"),
# Display only if TWD is checked
conditionalPanel(condition = "input.showTWD == true", plotOutput(outputId = "ggplot.TWD", height = "300px")),
plotOutput(outputId = "ggplot.CYCLE", height = "300px"),
# Display only if TWD is checked
conditionalPanel(condition = "input.showdaily == true", plotOutput(outputId = "ggplot.AMPLITUDE", height = "300px"))
# Select slider date range to be plotted
) )
) # END TAB PANEL
) # END TABSET
) # END UI
|
d3fedde72890a08f90a459f27633fc50f30e7078
|
02e2122532096c2d548665f6d6c771cd7cea7930
|
/benchmarking/R_code.R
|
cc5df7307025f0ee4e694843822872309596dc47
|
[
"MIT"
] |
permissive
|
Psy-Fer/deeplexicon
|
d7c67f1e9f4fb310110353a279ce5c78c10a3768
|
de177db383a109b31692c93d1c7119d9a1159569
|
refs/heads/master
| 2021-11-16T00:10:31.833367
| 2021-08-31T10:12:56
| 2021-08-31T10:12:56
| 207,097,317
| 26
| 7
|
MIT
| 2021-08-31T10:12:58
| 2019-09-08T10:39:30
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 10,986
|
r
|
R_code.R
|
library(ROCit)
# Get sensitivity for given specificity
Sn4Sp <- function( Metrics , Specificity ) {
for ( i in c(1:length(Metrics$SENS))) {
if ( Metrics$SPEC[i] < Specificity ) {
print(paste( i, Metrics$Cutoff[i], Metrics$SENS[i], Metrics$SPEC[i], Metrics$ACC[i]))
break
} } }
# Get Youden index (J-statistic)
Jstat <- function( Metrics ){
index <- 1 ; max <- -100
for ( i in c(1:length(Metrics$SENS))) {
tmp <- ( Metrics$SENS[i] + Metrics$SPEC[i] - 1 )
if ( tmp > max ) { max <- tmp; index=i }
}
print(paste0("J-stat: ", max))
print(paste0("ACC: ", Metrics$ACC[index]))
print(paste(index, Metrics$Cutoff[index], Metrics$SENS[index], Metrics$SPEC[index]))
}
#get cutoff and stats at max accuracy
Maxcuracy <- function( Metrics ){
index <- 1 ; max <- -100
for ( i in c(1:length(Metrics$ACC))) {
tmp <- ( Metrics$ACC[i])
if ( tmp > max ) { max <- tmp; index=i }
}
print(paste0("Macc Accuracy: ", max))
print(paste(index, Metrics$Cutoff[index], Metrics$SENS[index], Metrics$SPEC[index]))
}
#get stats at a given cutoff
CutoffStats <- function( Metrics, Thresh ){
for ( i in c(1:length(Metrics$Cutoff))) {
if ( Metrics$Cutoff[i] < Thresh ) {
print(paste("Index","Cutoff","Sens","Spec","Acc"))
print(paste(i-1, Metrics$Cutoff[i-1], Metrics$SENS[i-1], Metrics$SPEC[i-1], Metrics$ACC[i-1]))
print(paste(i, Metrics$Cutoff[i], Metrics$SENS[i], Metrics$SPEC[i], Metrics$ACC[i]))
break
}
}
}
##################################
# import testing data
# N.B. Default deeplexicon output should be parsed to split predictions to one per line
# This needs some R love, which I cannot provide as a master of BASH-FU.
# I run this in bash a priori on DeePlexiCon output:
#
# for DMUX in *csv ; do
# awk 'BEGIN{FS=","OFS="\t"}
# { if (NR>1)for (i=2;i<=NF;i++) {print $1"_BC"(i-1),$i}}' $DMUX > ${DMUX%*.csv}_split.tsv
# done
#
###################################
t <- read.delim("test_predictions_split.tsv",header=F)
ta <- read.delim("test_actuals_split.tsv",header=F)
#calculate accuracy metrics
tm <- measureit(score = as.numeric(t$V2), class = as.numeric(ta$V2) , measure = c("ACC", "SENS", "SPEC", "FSCR"))
for ( s in c(0.9999,0.999,0.99,0.90)) { Sn4Sp( tm, s) }
# print(paste( i, Metrics$Cutoff[i], Metrics$SENS[i], Metrics$SPEC[i], Metrics$ACC[i]))
# [1] "10104 1 0.411184880452935 0.999891382564186 0.852714757036373"
# [1] "22428 0.9969343 0.910472078530406 0.998995288718722 0.976864486171643"
# [1] "25122 0.013922857 0.993197833082155 0.989993618725646 0.990794672314773"
# [1] "31829 0.00040843227 0.996374893079712 0.899990495974366 0.924086595250703"
Jstat( tm )
# [1] "J-stat: 0.985974773600532"
# [1] "ACC: 0.993493136735775"
# print(paste(index, Metrics$Cutoff[index], Metrics$SENS[index], Metrics$SPEC[index]))
# [1] "24797 0.08085117 0.991975886929249 0.993998886671283"
#PLOT ROC
t_rocit_emp <- rocit( score = as.numeric(t$V2), class = as.numeric(ta$V2) , method = "emp")
pdf( "T_ROC.pdf", width=6, height=6)
plot( t_rocit_emp , legend=F, YIndex=T, col=c(2,"grey90"))
dev.off()
pdf( "T_RECOV.pdf", width=6, height=6 )
#Ploc accuracy and recovery = f(cutoff)
par(mar = c(5, 4, 4, 4) + 0.3)
plot( tm$Cutoff, tm$ACC, type="l" , ylim=c(0,1), xlab="Cutoff", ylab="Accuracy", axes=FALSE)
axis(2, las=1)
box()
par(new=TRUE)
lines( tm$Cutoff, tm$SENS, type="l", col=4)
mtext("% Recovery", side=4, col=4, line=3)
axis(4, col=4, col.axis=4,las=1)
axis(1)
grid()
dev.off()
#Precision Recall
pdf("T_PREC.pdf", width=6, height=6 )
plot( tm$TP/(tm$TP+tm$FP), tm$TP/(tm$TP+tm$FN), type="l", ylim=c(0,1), xlim=c(0,1), col=3, ylab="Recall", xlab="Precision")
grid()
dev.off()
##################################
#import validation data
v <- read.delim("val_predictions_split.tsv",header=F)
va <- read.delim("val_actuals_split.tsv",header=F)
#calculate accuracy metrics
vm <- measureit(score = as.numeric(v$V2), class = as.numeric(va$V2) , measure = c("ACC", "SENS", "SPEC", "FSCR"))
for ( s in c(0.9999,0.999,0.99,0.90)) { Sn4Sp( vm, s ) }
# print(paste( i, Metrics$Cutoff[i], Metrics$SENS[i], Metrics$SPEC[i], Metrics$ACC[i]))
# [1] "10235 1 0.416520711987292 0.999891382564186 0.854048714919962"
# [1] "20394 0.9991339 0.827624129363366 0.998995288718722 0.956152498879883"
# [1] "25105 0.015241252 0.992505396928842 0.989993618725646 0.990621563276445"
# [1] "31807 0.00037272074 0.995478799234247 0.899990495974366 0.923862571789337"
Jstat(vm)
# [1] "J-stat: 0.984522015396522"
# [1] "ACC: 0.994114292696835"
# print(paste(index, Metrics$Cutoff[index], Metrics$SENS[index], Metrics$SPEC[index]))
# [1] "24568 0.43959102 0.988554437701112 0.99596757769541"
Maxcuracy(vm)
# [1] "Macc Accuracy: 0.994368864812024"
# [1] "24423 0.8163857 0.9861105453953 0.997121637950932"
#PLOT ROC
v_rocit_emp <- rocit( score = as.numeric(v$V2), class = as.numeric(va$V2) , method = "emp")
pdf( "V_ROC.pdf", width=6, height=6)
plot( v_rocit_emp , legend=F, YIndex=T, col=c(2,"grey90"))
dev.off()
#Ploc accuracy and recovery = f(cutoff)
pdf( "V_RECOV.pdf", width=6, height=6)
par(mar = c(5, 4, 4, 4) + 0.3)
plot( vm$Cutoff, vm$ACC, type="l" , ylim=c(0,1), xlab="Cutoff", ylab="Accuracy", axes=FALSE)
axis(2, las=1)
box()
par(new=TRUE)
lines( vm$Cutoff, vm$SENS, type="l", col=4)
mtext("% Recovery", side=4, col=4, line=3)
axis(4, col=4, col.axis=4,las=1)
axis(1)
grid()
dev.off()
#Precision Recall
pdf("V_PREC.pdf", width=6, height=6)
plot( vm$TP/(vm$TP+vm$FP), vm$TP/(vm$TP+vm$FN), type="l", ylim=c(0,1), xlim=c(0,1), col=3, ylab="Recall", xlab="Precision")
grid()
dev.off()
##################################
#import rep5 data
r <- read.delim("rep5-test_predictions-on-rep2-4_split.tsv",header=F)
ra <- read.delim("rep5-test_actuals_split.tsv",header=F)
r2 <- read.delim("rep5-test_predictions-on-rep2-4_split_sub.tsv", header=F)
ra2 <- read.delim("rep5-test_actuals_split_sub.tsv",header=F)
#calculate accuracy metrics
rm <- measureit(score = as.numeric(r$V2), class = as.numeric(ra$V2) , measure = c("ACC", "SENS", "SPEC", "FSCR"))
rm2 <- measureit(score = as.numeric(r2$V2), class = as.numeric(ra2$V2) , measure = c("ACC", "SENS", "SPEC", "FSCR"))
for ( s in c(0.9999,0.999,0.99,0.90)) { Sn4Sp( rm, s ) }
# ( i, Metrics$Cutoff[i], Metrics$SENS[i], Metrics$SPEC[i], Metrics$ACC[i]))
Jstat(rm)
# print(paste(index, Metrics$Cutoff[index], Metrics$SENS[index], Metrics$SPEC[index]))
for ( s in c(0.9999,0.999,0.99,0.90)) { Sn4Sp( rm2, s ) }
# ( i, Metrics$Cutoff[i], Metrics$SENS[i], Metrics$SPEC[i], Metrics$ACC[i]))
[1] "1988 0.9999919 0.1983 0.999866666666667 0.799475"
[1] "5719 0.99891186 0.5687 0.998966666666667 0.8914"
[1] "8574 0.8951255 0.8272 0.989966666666667 0.949275"
[1] "12592 0.042037927 0.959 0.899966666666667 0.914725"
Jstat(rm2)
[1] "J-stat: 0.880333333333333"
[1] "ACC: 0.9458"
(index, Metrics$Cutoff[index], Metrics$SENS[index], Metrics$SPEC[index]))
[1] "10747 0.22783595 0.9289 0.951433333333333"
for ( s in c(0.9999,0.999,0.99,0.90)) { Sn4Sp( rm, s ) }
[1] "24250 0.99999523 0.17379348965438 0.999899487385667 0.793372987952845"
[1] "84575 0.99834526 0.604195683701162 0.998999660171637 0.900298666054018"
[1] "120947 0.880043 0.838325459845211 0.989998994873857 0.952080611116695"
[1] "176031 0.04012807 0.96380109989518 0.899999521368503 0.915949916000172"
Jstat(rm)
[1] "J-stat: 0.882390668600338"
[1] "ACC: 0.945877546917853"
[1] "150451 0.21432284 0.931830909064802 0.950559759535536"
Maxcuracy(rm)
[1] "Macc Accuracy: 0.955781629165889"
[1] "131883 0.64243907 0.884984851313126 0.97938055511681"
#PLOT ROC
r_rocit_emp <- rocit( score = as.numeric(r$V2), class = as.numeric(ra$V2) , method = "emp")
pdf( "fullROC.pdf", width=6, height=6)
plot( r_rocit_emp , legend=F, YIndex=T, col=c(2,"grey90"))
dev.off()
pdf( "fullRECOV.pdf", width=6, height=6)
par(mar = c(5, 4, 4, 4) + 0.3)
plot( rm$Cutoff, rm$ACC, type="l" , ylim=c(0,1), xlab="Cutoff", ylab="Accuracy", axes=FALSE)
axis(2, las=1)
box()
par(new=TRUE)
lines( rm$Cutoff, rm$SENS, type="l", col=4)
mtext("% Recovery", side=4, col=4, line=3)
axis(4, col=4, col.axis=4,las=1)
axis(1)
grid()
dev.off()
#Precision Recall
pdf("fullPREC.pdf", width=6, height=6)
plot( rm$TP/(rm$TP+rm$FN), rm$TP/(rm$TP+rm$FP), type="l", ylim=c(0,1), xlim=c(0,1), col=3, xlab="Recall", ylab="Precision")
grid()
dev.off()
r2_rocit_emp <- rocit( score = as.numeric(r2$V2), class = as.numeric(ra2$V2) , method = "emp")
pdf( "ROC.pdf", width=6, height=6)
plot( r2_rocit_emp , legend=F, YIndex=T, col=c(2,"grey90"))
dev.off()
pdf( "RECOV.pdf", width=6, height=6)
par(mar = c(5, 4, 4, 4) + 0.3)
plot( rm2$Cutoff, rm2$ACC, type="l" , ylim=c(0,1), xlab="Cutoff", ylab="Accuracy", axes=FALSE)
axis(2, las=1)
box()
par(new=TRUE)
lines( rm2$Cutoff, rm2$SENS, type="l", col=4)
mtext("% Recovery", side=4, col=4, line=3)
axis(4, col=4, col.axis=4,las=1)
axis(1)
grid()
dev.off()
#Precision Recall
plot( rm$TP/(rm$TP+rm$FN), rm$TP/(rm$TP+rm$FP), type="l", ylim=c(0,1), xlim=c(0,1), col=3, xlab="Recall", ylab="Precision")
pdf("PREC.pdf", width=6, height=6)
plot( rm2$TP/(rm2$TP+rm2$FN), rm2$TP/(rm2$TP+rm2$FP), type="l", ylim=c(0,1), xlim=c(0,1), col=3, xlab="Recall", ylab="Precision")
grid()
dev.off()
##################################
#import REP1 data
r1 <- read.delim("rep1-test_predictions_split.tsv",header=F)
r1a <- read.delim("rep1-test_actuals_split.tsv",header=F)
#calculate accuracy metrics
r1m <- measureit(score = as.numeric(r1$V2), class = as.numeric(r1a$V2) , measure = c("ACC", "SENS", "SPEC", "FSCR"))
for ( s in c(0.9999,0.999,0.99,0.90)) { Sn4Sp( r1m, s ) }
# ( i, Metrics$Cutoff[i], Metrics$SENS[i], Metrics$SPEC[i], Metrics$ACC[i]))
[1] "831 1 0.0248839255909932 0.999898845830931 0.756145115770946"
[1] "4692 0.9999933 0.139349983309562 0.998998573726216 0.784086426122053"
[1] "20914 0.98338217 0.604618699359694 0.989995852679068 0.893651564349225"
[1] "38807 0.16259591 0.877613570843322 0.899998988458309 0.894402634054563"
Jstat(r1m)
[1] "J-stat: 0.778705024327578"
[1] "ACC: 0.898211088520013"
[1] "37911 0.19139561 0.87163535945134 0.907069664876238"
Maxcuracy(r1m)
[1] "Macc Accuracy: 0.917056110217583"
[1] "28381 0.75454766 0.764725518162231 0.967832974236033"
r1_rocit_emp <- rocit( score = as.numeric(r1$V2), class = as.numeric(r1a$V2) , method = "emp")
pdf( "ROC.pdf", width=6, height=6)
plot( r1_rocit_emp , legend=F, YIndex=T, col=c(2,"grey90"))
dev.off()
pdf( "RECOV.pdf", width=6, height=6)
par(mar = c(5, 4, 4, 4) + 0.3)
plot( r1m$Cutoff, r1m$ACC, type="l" , ylim=c(0,1), xlab="Cutoff", ylab="Accuracy", axes=FALSE)
axis(2, las=1)
box()
par(new=TRUE)
lines( r1m$Cutoff, r1m$SENS, type="l", col=4)
mtext("% Recovery", side=4, col=4, line=3)
axis(4, col=4, col.axis=4,las=1)
axis(1)
grid()
dev.off()
#Precision Recall
pdf("PREC.pdf", width=6, height=6)
plot( r1m$TP/(r1m$TP+r1m$FN), r1m$TP/(r1m$TP+r1m$FP), type="l", ylim=c(0,1), xlim=c(0,1), col=3, xlab="Recall", ylab="Precision")
grid()
dev.off()
|
5f15b0246620c80089dd07be52eba474e01c6da2
|
bf302f647f6ae91b9a3cc6055d936b91541d3876
|
/Handling multiple CSV files/test_hw2.R
|
5c1d57e314dbf5d3a7310f536c8142c151912b9f
|
[] |
no_license
|
Abhishek19895/R_prog_2015
|
60043eed994556cbaab762c12aea0e4a59ce3a20
|
2baa9dcf72689653ed7e423864a6ae4a6c414007
|
refs/heads/master
| 2020-12-23T11:47:29.070205
| 2016-08-11T18:25:41
| 2016-08-11T18:25:41
| 41,228,307
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 816
|
r
|
test_hw2.R
|
library("testthat")
test_dir(".")
cat("\nRunning unit tests\n")
source("mean_auc.R")
source("num_trees_auc.R")
source("min_num_trees.R")
test_that("mean_auc", {
cat("\nTesting mean_auc.R \n")
a <- mean_auc("exp-auc")
expect_that(length(a), equals(2) )
expect_that(names(a), equals(c("campaign","mean_auc")))
expect_that(nrow(a), equals(47))
})
test_that("num_trees_auc", {
cat("\nTesting num_trees_auc.R \n")
a <- num_trees_auc("exp-auc", threshold=0.7)
expect_that(nrow(a), equals(8))
expect_that(names(a), equals(c("num_trees","mean_auc")))
expect_that(median(a$num_trees), equals(25))
})
test_that("min_num_trees", {
cat("\nTesting min_num_trees.R \n")
a <- min_num_trees("exp-auc")
expect_that(names(a), equals(c("campaign","num_trees")))
expect_that(nrow(a), equals(47))
})
|
c1deb647febd7039947f8f0719a26cb085f0ec0e
|
e1e81c4482a8646fb923bd8f51c3194a9d94b778
|
/man/fars_read_years.Rd
|
2df344114df5034448003666d95b7b3a262972e2
|
[] |
no_license
|
pchhina/fars
|
2702a20e7ed5cb769aa9e0a2256d9d96cd96e3d0
|
693639cf4f0ff173518bca5e87ae996697de1e87
|
refs/heads/master
| 2020-03-27T20:41:24.785717
| 2018-09-02T15:55:06
| 2018-09-02T15:55:06
| 147,086,691
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 712
|
rd
|
fars_read_years.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{fars_read_years}
\alias{fars_read_years}
\title{Reads data from a file for each year, subsets month and year and saves it into a list}
\usage{
fars_read_years(years)
}
\arguments{
\item{years}{a single year or a vector containing multiple years}
}
\value{
This function returns a list containing month and year from each years data
}
\description{
This function reads in a vector of years. For each year, it makes a filename, reads the data from that file, adds a year column to the data, selects, month and year variables, and finally stores that into an element of list.
}
\examples{
fars_read_years(2012)
}
|
5ac408a9a800583f85e1cd2522817389dba43005
|
55cad56f6731914f4af697a6893e6be27b43ca78
|
/cachematrix.R
|
93756cf73dec0a68c42ccd2119f61adcc93000d4
|
[] |
no_license
|
bsylvemdek12/ProgrammingAssignment2
|
754184c0b3b88dd4eacbe8496b69ca7c9eb52626
|
43d5d101ad624615e9ce97fef7f464442fcf5a06
|
refs/heads/master
| 2021-01-11T03:55:32.450161
| 2016-10-18T17:59:41
| 2016-10-18T17:59:41
| 71,272,623
| 0
| 0
| null | 2016-10-18T17:17:23
| 2016-10-18T17:17:22
| null |
UTF-8
|
R
| false
| false
| 1,107
|
r
|
cachematrix.R
|
## The first function is basically an object that allows for storing of the matrix
## if it has been solved already. It gets or sets similar to what you do in object
## oriented programming. x stores the matrix to be solved, m stores the inverse.
## This function handles storing the matrix and its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function (y) {
x <<- y
m <<- NULL
}
get <-function() x
setMtrx <- function(solve) m<<- solve
getMtrx <- function() m
list (set=set, get=get, setMtrx=setMtrx, getMtrx=getMtrx)
}
## This function solves the matrix by first checking to see if it is in cache.
## If it is it returns the inverse from cache otherwise it calculates the inverse then
## stores it in cache then returns the value.
## A message is printed if retrieved from cache.
cacheSolve <- function(x, ...) {
m <- x$getMtrx()
if(!is.null(m)){
message("getting cached data")
#Return from cache
return(m)
}
data <- x$get()
m <- solve(data,...)
x$setMtrx(m)
#Return not from cache
m
}
|
e183c7726976b276a4f2a9bf78b328cdd3a4cc5b
|
4527a4115dec631c1b1ca75caf6ffdc9c2533328
|
/man/nameHash.Rd
|
38755430cf9b2a7d3ba11e2a56ed7fc4f88fe04b
|
[] |
no_license
|
seaYali/dagLogo
|
ce0aded0290815657a1d4dde294b75baa22812b8
|
ab914c7d57e570fedfc2c441e7e12845fb403feb
|
refs/heads/master
| 2021-04-26T16:45:13.975463
| 2020-06-26T15:03:32
| 2020-06-26T15:03:32
| 123,968,139
| 0
| 0
| null | 2018-03-05T19:28:40
| 2018-03-05T19:28:40
| null |
UTF-8
|
R
| false
| true
| 510
|
rd
|
nameHash.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dagLogo.R
\name{nameHash}
\alias{nameHash}
\title{convert group name to a single character}
\usage{
nameHash(nameScheme = c("classic", "charge", "chemistry", "hydrophobicity"))
}
\arguments{
\item{nameScheme}{could be "classic", "charge", "chemistry", "hydrophobicity"}
}
\value{
A character vector of name scheme
}
\description{
convert group name to a single character to shown in a logo
}
\author{
Jianhong Ou
}
\keyword{figure}
|
2c5b8d8be541dfb411368e6b50bd183c4a96565c
|
3b63c83f878c3c4df3e46c15cf3193ec123a62f0
|
/Step_8th_Corr_EvoMyelinCBF/Step_1st_PrepareData_ForSpinTest.R
|
d8f855de1935a1b1414725a7ccfc07af7ecbde8c
|
[] |
no_license
|
ZaixuCui/pncSingleFuncParcel_Old
|
4526111154ffdc32eb766ab1a367b6bee8d85c8a
|
d9f640215b0a4f3897a1956f60470d8b154fa0ce
|
refs/heads/master
| 2020-06-14T20:09:39.297967
| 2019-07-04T15:30:32
| 2019-07-04T15:30:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,374
|
r
|
Step_1st_PrepareData_ForSpinTest.R
|
library(R.matlab)
ReplicationFolder = '/data/jux/BBL/projects/pncSingleFuncParcel/Replication';
ResultsFolder = paste0(ReplicationFolder, '/results');
VariabilityLabel_Mat = readMat(paste0(ResultsFolder, '/SingleParcellation/SingleAtlas_Analysis/Variability_Visualize/VariabilityLabel.mat'));
VariabilityLoading_17SystemMean_Mat = readMat(paste0(ResultsFolder, '/SingleParcellation/SingleAtlas_Analysis/Variability_Visualize/VariabilityLoading_Median_17SystemMean.mat'));
WorkingFolder = paste0(ResultsFolder, '/Corr_EvoGradientMyelinScalingCBF');
SpinTest_Folder = paste0(WorkingFolder, '/PermuteData_SpinTest');
dir.create(SpinTest_Folder, recursive = TRUE);
VariabilityLoading_17SystemMean_lh_CSV = data.frame(VariabilityLoading_lh =
t(VariabilityLoading_17SystemMean_Mat$VariabilityLoading.Median.17SystemMean.lh));
write.table(VariabilityLoading_17SystemMean_lh_CSV, paste0(SpinTest_Folder, '/VariabilityLoading_17SystemMean_lh.csv'), row.names = FALSE, col.names = FALSE);
VariabilityLoading_17SystemMean_rh_CSV = data.frame(VariabilityLoading_rh =
t(VariabilityLoading_17SystemMean_Mat$VariabilityLoading.Median.17SystemMean.rh));
write.table(VariabilityLoading_17SystemMean_rh_CSV, paste0(SpinTest_Folder, '/VariabilityLoading_17SystemMean_rh.csv'), row.names = FALSE, col.names = FALSE);
|
573246fa880e4e5aeaa3cdf290d0334be1abd8e7
|
ebc76e245b1c6ad32195a9a0c9fd26826b5a8173
|
/man/src_snowflakedb.Rd
|
f6f3a00ff608fe8f8b884184aee3326824d8bae2
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
eddychansnowflake/snowflakedb-test
|
3672cfd5135b67a7c75da95dc87027ae83b6d1ea
|
a8916963828016280b1562a1b8fe8a9262910809
|
refs/heads/master
| 2021-01-11T02:25:07.566211
| 2016-10-14T19:21:15
| 2016-10-14T19:21:15
| 70,940,230
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 6,310
|
rd
|
src_snowflakedb.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/src-snowflakedb.R
\name{src_snowflakedb}
\alias{src_snowflakedb}
\alias{tbl.src_snowflakedb}
\title{dplyr backend support for SnowflakeDB (https://snowflake.net)}
\usage{
src_snowflakedb(user = NULL, password = NULL, account = NULL,
port = 443, host = NULL, opts = list(), ...)
tbl.src_snowflakedb(src, from, ...)
}
\arguments{
\item{user}{Username}
\item{password}{Password}
\item{account}{Account Name (e.g. <account>.snowflakecomputing.com)}
\item{port}{Port (Defaults to 443, the default for public endpoints)}
\item{host}{Hostname (Not required for public endpoints, defaults to
<account>.snowflakecomputing.com)}
\item{opts}{List of other parameters to pass (warehouse, db, schema, tracing)}
\item{...}{for the src, other arguments passed on to the underlying
database connector, \code{dbConnect}. For the tbl, included for
compatibility with the generic, but otherwise ignored.}
\item{src}{a snowflakedb src created with \code{src_snowflakedb}.}
\item{from}{Either a string giving the name of table in database, or
\code{\link{sql}} described a derived table or compound join.}
}
\description{
Use \code{src_snowflakedb} to connect to an existing Snowflake database,
and \code{tbl} to connect to tables within that database.
}
\section{Debugging}{
To see exactly what SQL is being sent to the database, you see
\code{\link{show_query}} and \code{\link{explain}}.
}
\section{Grouping}{
Typically you will create a grouped data table is to call the \code{group_by}
method on a mysql tbl: this will take care of capturing
the unevalated expressions for you.
}
\section{Output}{
All data manipulation on SQL tbls are lazy: they will not actually
run the query or retrieve the data unless you ask for it: they all return
a new \code{\link{tbl_sql}} object. Use \code{\link{compute}} to run the
query and save the results in a temporary in the database, or use
\code{\link{collect}} to retrieve the results to R.
Note that \code{do} is not lazy since it must pull the data into R.
It returns a \code{\link{tbl_df}} or \code{\link{grouped_df}}, with one
column for each grouping variable, and one list column that contains the
results of the operation. \code{do} never simplifies its output.
}
\section{Query principles}{
This section attempts to lay out the principles governing the generation
of SQL queries from the manipulation verbs. The basic principle is that
a sequence of operations should return the same value (modulo class)
regardless of where the data is stored.
\itemize{
\item \code{arrange(arrange(df, x), y)} should be equivalent to
\code{arrange(df, y, x)}
\item \code{select(select(df, a:x), n:o)} should be equivalent to
\code{select(df, n:o)}
\item \code{mutate(mutate(df, x2 = x * 2), y2 = y * 2)} should be
equivalent to \code{mutate(df, x2 = x * 2, y2 = y * 2)}
\item \code{filter(filter(df, x == 1), y == 2)} should be
equivalent to \code{filter(df, x == 1, y == 2)}
\item \code{summarise} should return the summarised output with
one level of grouping peeled off.
}
}
\examples{
\dontrun{
# Connection basics ---------------------------------------------------------
# To connect to a database first create a src:
my_db <- src_snowflakedb(user = "snowman",
password = "letitsnow",
account = "acme",
opts = list(warehouse = "mywh",
db = "mydb",
schema = "public")
# Then reference a tbl within that src
my_tbl <- tbl(my_db, "my_table")
}
\donttest{
# Here we'll use the Lahman database: to create your own in-database copy,
# create a database called "lahman", or tell lahman_snowflakedb() how to
# connect to a database that you can write to
#if (has_lahman("snowflakedb", account = "acme",
# user = "snowman", password = "letitsnow",
# opts=list(warehouse="wh", db="lahman", schema="public"))) {
lahman_p <- lahman_snowflakedb()
# Methods -------------------------------------------------------------------
batting <- tbl(lahman_p, "Batting")
dim(batting)
colnames(batting)
head(batting)
# Data manipulation verbs ---------------------------------------------------
filter(batting, yearID > 2005, G > 130)
select(batting, playerID:lgID)
arrange(batting, playerID, desc(yearID))
summarise(batting, G = mean(G), n = n())
mutate(batting, rbi2 = if(is.null(AB)) 1.0 * R / AB else 0)
# note that all operations are lazy: they don't do anything until you
# request the data, either by `print()`ing it (which shows the first ten
# rows), by looking at the `head()`, or `collect()` the results locally.
system.time(recent <- filter(batting, yearID > 2010))
system.time(collect(recent))
# Group by operations -------------------------------------------------------
# To perform operations by group, create a grouped object with group_by
players <- group_by(batting, playerID)
group_size(players)
summarise(players, mean_g = mean(G), best_ab = max(AB))
best_year <- filter(players, AB == max(AB) | G == max(G))
progress <- mutate(players,
cyear = yearID - min(yearID) + 1,
ab_rank = rank(desc(AB)),
cumulative_ab = order_by(yearID, cumsum(AB)))
# When you group by multiple level, each summarise peels off one level
per_year <- group_by(batting, playerID, yearID)
stints <- summarise(per_year, stints = max(stint))
filter(stints, stints > 3)
summarise(stints, max(stints))
# mutate(stints, order_by(yearID, cumsum(stints)))
# Joins ---------------------------------------------------------------------
player_info <- select(tbl(lahman_p, "Master"), playerID, birthYear)
hof <- select(filter(tbl(lahman_p, "HallOfFame"), inducted == "Y"),
playerID, votedBy, category)
# Match players and their hall of fame data
inner_join(player_info, hof)
# Keep all players, match hof data where available
left_join(player_info, hof)
# Find only players in hof
semi_join(player_info, hof)
# Find players not in hof
anti_join(player_info, hof)
# Arbitrary SQL -------------------------------------------------------------
# You can also provide sql as is, using the sql function:
batting2008 <- tbl(lahman_p,
sql('SELECT * FROM "Batting" WHERE "yearID" = 2008'))
batting2008
#}
}
}
|
ee4d66969782c32751201301217ba60e203e488c
|
98eaa4e4992701d44c3637f292ded9b26017f9f1
|
/R/classStack.R
|
42dd04736a2444448f2686f72d68f669412ee895
|
[] |
no_license
|
nplatonov/ursa
|
4e946f4bddea1b947200953f0570c01a0b8734c1
|
ecb0b4693b470a9bc4df34188b4925589b8d3988
|
refs/heads/master
| 2023-06-26T12:12:58.134465
| 2023-06-13T10:01:34
| 2023-06-13T10:01:34
| 46,657,847
| 7
| 2
| null | 2023-09-10T18:03:27
| 2015-11-22T11:34:23
|
R
|
UTF-8
|
R
| false
| false
| 2,286
|
r
|
classStack.R
|
'as.list.ursaRaster' <- function(x,...) ursa_stack(x,...)
'unlist.ursaStack' <- function(x,recursive,use.names) ursa_brick(x)
'ursa_apply' <- function(obj,FUN,...) {
# if (!.is.ursa_stack(res))
# res <- lapply(seq(obj),function(i),FUN=FUN,obj...)
res <- lapply(X=obj,FUN=FUN,...)
if (.is.ursa_stack(res))
class(res) <- "ursaStack"
res
}
'ursa_stack' <- function(...) { ## 'ursa_hetero' (make syn?)
obj <- list(...)
if ((length(obj)==1)&&(is.ursa(obj[[1]]))) {
obj <- obj[[1]]
res <- vector("list",nband(obj))
# names(res) <- bandname(obj)
for (i in seq_along(res)) {
res[[i]] <- obj[i]
}
names(res) <- names(obj)
class(res) <- "ursaStack"
return(res)
}
class(obj) <- "ursaStack"
obj
}
'ursa_brick' <- function(obj) { ## 'ursa_homo' (make syn?)
if (is.ursa(obj))
return(obj)
isList <- .is.ursa_stack(obj)
if (!isList)
return(NULL)
n <- sapply(obj,nband)
nodata <- unique(sapply(obj,ignorevalue))
rname <- unname(unlist(lapply(obj,bandname)))
res <- ursa_new(nband=sum(n))#,bandname=rname)
oname <- names(obj)
k <- 0L
for (i in seq_along(obj)) {
if (!n[i])
next
img <- .extract(obj[[i]])
##~ if (.is.colortable(img)) {
##~ print(img)
##~ print(ursa_colortable(img))
##~ img <- reclass(img)
##~ print(img)
##~ }
nl <- nband(img)
k2 <- k+seq(nl)
res[k2] <- img
if ((!is.null(oname))&&(nl==1)) {
# bandname(res)[k2] <- oname[i]
rname[k2] <- oname[i]
}
k <- k+nl
}
if (all(tail(duplicated(lapply(obj,ursa_colortable)),-1)))
if (length(nodata)==1)
ignorevalue(res) <- nodata
bandname(res) <- rname
if (all(tail(duplicated(lapply(obj,ursa_colortable)),-1))) {
ct <- ursa_colortable(obj[[1]])
if (length(ct)) {
ursa_colortable(res) <- ct
class(ursa_value(res)) <- "ursaCategory"
}
}
# class(res) <- c(class(res),"ursaBrick") ## not necessary
res
}
'.is.ursa_stack' <- function(obj) {
if (is.ursa(obj))
return(FALSE)
if (!is.list(obj))
return(FALSE)
all(sapply(obj,function(x) is.ursa(x) | is.null(x)))
}
'.is.ursa_brick' <- function(obj) is.ursa(obj)
|
1f26bd1d62bd2a7d7abed1197e0ad61fe9eb78a1
|
ce78c0dc327e45fe97f5b0b0d84c09413c18b27c
|
/ch3.R
|
3ba6bca3aee1fb82b9d0c1c88737a1f3a8e5971f
|
[] |
no_license
|
dylansun/ISLR-Lab
|
ae3b6d178b31181614fe5ccab2a58740a50a5da5
|
d86e61a89cbc481ae89beb2b73aef5ed66067e0d
|
refs/heads/master
| 2020-04-23T12:11:42.362491
| 2015-03-16T13:12:43
| 2015-03-16T13:12:43
| 31,641,322
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,327
|
r
|
ch3.R
|
library(MASS)
library(ISLR)
### Simple linear regression
names(Boston)
?Boston
plot(medv ~ lstat, Boston)
fit1 <- lm(medv~ lstat, data = Boston)
summary(fit1)
abline(fit1, col = "red")
names(fit1)
confint(fit1)
predict(fit1, data.frame(lstat = c(5,10,15)), interval = "confidence")
fit2 = lm(medv ~ lstat + age, data = Boston)
summary(fit2)
fit3 = lm(medv ~ ., data = Boston)
summary(fit3)
par(mfrow = c(2,2))
plot(fit3)
fit4 = update(fit3, ~.-age - indus)
summary(fit4)
### non-linear terms and Interactions
fit5 = lm(medv ~ lstat*age, Boston)
summary(fit5)
fit6 = lm(medv ~ lstat + I(lstat^2), Boston)
summary(fit6)
attach(Boston)
?attach
par(mfrow=c(1,1))
plot(medv~lstat)
points(lstat, fitted(fit6), col = "red", pch = 3)
fit7 = lm(medv~poly(lstat,4))
points(lstat, fitted(fit7),col="blue",pch = 2)
plot(1:20,1:20, pch = 1:20)
### Qualitative predictors
fix(Carseats)
names(Carseats)
summary(Carseats)
fit1 = lm(Sales~.+Income:Advertising+Age:Price, data = Carseats)
summary(fit1)
contrasts(Carseats$ShelveLoc)
### Writing R functions
regplot = function(x,y){
fit = lm(y~x)
plot(x,y)
abline(fit, col = "red")
}
attach(Carseats)
regplot(Price,Sales)
regplot = function(x,y,...){
fit = lm(y~x)
plot(x,y,...)
abline(fit, col = "red")
}
regplot(Price, Sales, xlab="Price", ylab = "Sales",col = "blue", pch = 20)
|
a9992eb160de9fb109b6c960fbadbc7a519d0ffc
|
13480cc6f45489819cb71af78d0448976066f278
|
/export_metabolights.R
|
5c307dda0d773c8f05630f5331d16af8d40eef20
|
[] |
no_license
|
sandrejev/drugs_bioaccumulation
|
3d04f6607fd3089857ada80fab658a6f4193a6ea
|
d4796ffe73348bc7b5fdb59916a4db7bf65455d8
|
refs/heads/master
| 2021-07-31T22:47:51.434127
| 2021-07-22T00:11:14
| 2021-07-22T00:11:14
| 173,614,059
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,864
|
r
|
export_metabolights.R
|
dir.create("reports", showWarnings=F)
library(rCharts)
library(networkD3)
library(dplyr)
library(readr)
library(cowplot)
library(xlsx)
library(ggplot2)
library(grid)
source("functions.R")
exp1depletion.export_metabolights = function()
{
#
# Load Species and drug data
#
drug_map = readr::read_delim("data/drug_map.tsv", "\t")
bug_map = readr::read_delim("data/bug_map.tsv", "\t")
load("data/exp1depletion/170511_processingUPLCpeaks_data.clean_aftergrowthmerge_andfixingget.datacleanfunction.RData")
# plates_with_growth = unique(data.clean %>% dplyr::filter(Status=="GMM" & growth=="growth") %>% .$Plate)
# data.degrad = data.clean %>% dplyr::filter(Status=="sample" & growth=="growth" & dummy==1 & Plate %in% plates_with_growth)
uplc_interaction_map = readr::read_tsv("data/exp2metabolomics/raw_annotations/uplc_interactions_mapping.tsv")
plate_mapping = readr::read_tsv("data/exp2metabolomics/raw_annotations/uplc_plata_mapping.tsv")
#
# Load mapping between raw files and final data
#
growth_file2plate = readr::read_csv("data/exp0growth/160106_FileToPlate.csv")
growth_curves_annotations = readr::read_tsv("data/exp0growth/raw_annotations/curves_annotation_2016-11-28.tab") %>%
dplyr::inner_join(growth_file2plate, by="File")
#
# Load one mapping between raw files and final data that was missing in the main mapping (Plate_Martina: Buniformis66A2_Buniformis65A2)
#
uplc_interaction_map_cols = paste0("^(", paste(c("AssayName", "SampleName", "Sample.Set.Name", "Vial", "Plate_Martina", "Acq.Method.Set", "Channel.Description", "Channel.Id"), collapse="|"), ")$")
uplc_interaction_map_Buniformis66A2_Buniformis65A2 = readr::read_tsv("data/exp2metabolomics/raw_annotations/report157342.txt") %>%
dplyr::mutate(AssayName="Screen", Plate_Martina="Buniformis66A2_Buniformis65A2") %>%
setNames(gsub(" ", ".", colnames(.)))
uplc_interaction_map_extended = dplyr::bind_rows(
uplc_interaction_map %>% dplyr::filter(Plate_Martina!="Buniformis66A2_Buniformis65A2"),
uplc_interaction_map_Buniformis66A2_Buniformis65A2) %>%
dplyr::select(dplyr::matches(uplc_interaction_map_cols))
#
# Prepare annotations
#
annotations = data.clean %>%
dplyr::distinct(GroupName, Plate, Batch, drug, Status, Well, Replicate, Species.x) %>%
dplyr::group_by(GroupName, Plate, drug, Status, Species.x) %>%
dplyr::mutate(Replicate=1:n()) %>%
dplyr::ungroup() %>%
dplyr::rename(species.long="Species.x")
#
# Prepare mappings
#
channel_mappings = uplc_interaction_map_extended %>%
dplyr::filter(AssayName=="Screen") %>%
dplyr::filter(!grepl("Spect", Channel.Description)) %>%
# dplyr::filter(!grepl("down|purging", Acq.Method.Set)) %>%
dplyr::group_by(AssayName, SampleName, Sample.Set.Name, Vial) %>%
dplyr::mutate(ChannelNo=1:n()) %>%
dplyr::ungroup() %>%
dplyr::mutate(Plate_Martina_Number=as.numeric(gsub(":.*", "", Vial)), Well=gsub(".*:", "", Vial)) %>%
dplyr::inner_join(plate_mapping, by=c("Plate_Martina", "Plate_Martina_Number")) %>%
dplyr::mutate(drug.something=gsub("^[^_]+_", "", Acq.Method.Set)) %>%
dplyr::mutate(drug.something=dplyr::case_when(drug.something=="eyet"~"ezet", drug.something=="rosuva"~"rosu", drug.something=="donez"~"done", T~drug.something)) %>%
dplyr::left_join(drug_map %>% dplyr::select(drug.short, drug.long1=drug.long), by=c("drug.something"="drug.short")) %>%
dplyr::left_join(drug_map %>% dplyr::select(drug.short2, drug.long2=drug.long), by=c("drug.something"="drug.short2")) %>%
dplyr::mutate(drug.long=dplyr::case_when(!is.na(drug.long2)~drug.long2, !is.na(drug.long1)~drug.long1, T~NA_character_)) %>%
dplyr::select(-drug.long1, -drug.long2) %>%
dplyr::mutate(Status=dplyr::case_when(
grepl("ctrl", SampleName)~"ctrl",
grepl("GMM", SampleName)~"GMM",
grepl("down|purging", Acq.Method.Set)~"technical",
T~"sample")) %>%
dplyr::mutate(GroupName=ifelse(Status=="technical", NA_character_, substr(Acq.Method.Set, 1, 4))) %>%
dplyr::mutate(drug.long=ifelse(Status=="GMM", NA_character_, drug.long)) %>%
dplyr::group_by(Plate, GroupName, Status) %>%
dplyr::mutate(Replicate=match(paste(SampleName, Vial), unique(paste(SampleName, Vial)))) %>%
dplyr::ungroup() %>%
dplyr::mutate(Extraction="Supernatant") %>%
dplyr::select(AssayName, Sample.Set.Name, Extraction, Plate_Martina, SampleName, GroupName, Plate, Vial, Replicate, Status, drug.long, Channel.Description, Channel.Id, ChannelNo) # Replicate, mz, RetentionTime, species.short
# Pyri is excluded from drugs
output = annotations %>%
dplyr::left_join(channel_mappings %>% dplyr::mutate(has_data=T), by=c("Plate", "GroupName", "Status", "Replicate")) %>%
# dplyr::mutate(species.long=dplyr::case_when(Status=="ctrl"~NA_character_, species=="Standard"~NA_character_, T~species.long)) %>%
dplyr::select(AssayName, Extraction, Sample.Set.Name, Plate, SampleName, GroupName, Status, Replicate, Vial, drug.long, species.long, Channel.Description, Channel.Id, has_data)
stopifnot(any(!is.na(output$has_data)))
readr::write_tsv(output %>% dplyr::select(-has_data), path="data/exp1depletion/uplc_channels_annotation.tsv")
if(F) {
input_dir.report = "/g/patil/Share/EmpowerProjects/DepletionModeAssay_Rawraw/Raw_readable"
input_dir.raw = "/g/patil/Share/EmpowerProjects/Martina"
# input_dir.report = "/g/scb2/patil/andrejev/UPLC_depletion/reports"
# input_dir.raw = "/g/scb2/patil/andrejev/UPLC_depletion/raw"
output_dir = "/g/scb2/patil/andrejev/UPLC_depletion"
input_dir.report = "data/exp1depletion/arw"
input_dir.raw = "data/exp1depletion/raw"
output_dir = "data/exp1depletion/raw_filtered"
# Check missing report files
input_dir.report_files = list.files(input_dir.report)
input_dir.report_files.expected = sprintf("d%s.arw", xy$Channel.Id)
writeLines(sprintf("Missing report files: %i", length(setdiff(input_dir.report_files.expected, input_dir.report_files))))
input_dir.raw_files = list.files(input_dir.raw)
input_dir.raw_files.expected = sprintf("d%s.dat", xy$Channel.Id)
writeLines(sprintf("Missing RAW files: %i", length(setdiff(input_dir.raw_files.expected, input_dir.raw_files))))
#
# Collect all reports into single file
#
i.run = 0
reports = data.frame()
reports.100 = data.frame()
for(i in output$Channel.Id) {
if(i %in% unique(reports$Channel.Id)) next
i.file = sprintf(sprintf("%s/d%i.arw", input_dir.report, i))
if(file.exists(i.file)) {
i.run = i.run+1
print(sprintf("%i/%i (added: %i)", which(i==unique(output$Channel.Id)), length(unique(output$Channel.Id)), i.run))
report.i = readr::read_tsv(i.file, skip=2, col_names=c("RetentionTime", "Intensity"), col_types=readr::cols())
report.i_meta = readr::read_tsv(i.file, n_max=1, locale=readr::locale(asciify=T, encoding="Windows-1252"), col_types=readr::cols()) %>%
data.frame() %>%
dplyr::select(Channel.Description, Channel.Id, Sample.Set.Name, Sample.Set.Method, SampleName, Instrument.Method.Name, Vial)
reports.100 = rbind(reports.100, cbind(report.i_meta, report.i))
if(i.run %% 100==0) {
reports = rbind(reports, reports.100)
reports.100 = data.frame()
}
}
}
reports = rbind(reports, reports.100)
reports_distinct = reports %>% dplyr::distinct(Channel.Id, RetentionTime, .keep_all=T) %>% dplyr::select(Channel.Id, ElutionTime=RetentionTime, Intensity)
readr::write_tsv(reports_distinct, path="data/exp1depletion/uplc_raw.tsv", na="")
#
# Copy related files to a separate directory
#
cmd = list()
if(!file.exists(output_dir)) cmd[["create_dir"]] = sprintf("mkdir %s", output_dir)
if(!file.exists(sprintf("%s/raw", output_dir))) cmd[["create_dir/raw"]] = sprintf("mkdir %s/raw", output_dir)
# if(!file.exists(sprintf("%s/reports", output_dir))) cmd[["create_dir/raw"]] = sprintf("mkdir %s/reports", output_dir)
# cmd[["copy_reports"]] = sprintf("cp %s/report_raw%i.arw %s/reports", input_dir.report, xy$Channel.Id, output_dir)
# cmd[["zip_reports"]] = sprintf("zip -r %s/reports.zip %s/reports", output_dir, output_dir)
# cmd[["copy_eic"]] = sprintf("data/exp2metabolomics/data.eic.csv %s/raw.zip", output_dir)
cmd[["copy_raw"]] = sprintf("cp %s/d%i.dat %s/raw/", input_dir.raw, xy$Channel.Id, output_dir)
cmd[["zip_raw"]] = sprintf("zip -r %s/raw.zip %s/raw", output_dir, output_dir)
writeLines(unlist(cmd), con=sprintf("%s/collect_uplc_depletion.sh", output_dir))
}
}
exp2metabolomics.export_metabolights = function()
{
drug_map = readr::read_delim("data/drug_map.tsv", "\t")
bug_map = readr::read_delim("data/bug_map.tsv", "\t")
uplc_interaction = readr::read_delim("data/exp2metabolomics/data.depletionmodeassay_long.csv", ",") %>%
# dplyr::filter(!is.na(Drugs)) %>%
dplyr::mutate(species.code2=substr(SampleName, 1, 2), species.code3=substr(SampleName, 1, 3)) %>%
dplyr::distinct(Extraction, Sample.Set.Name, SampleName, Channel.Description, Vial, mz=X., RetentionTime=RT, drug.short2=Drugs, Replicate, species.code2, species.code3, Ctrl) %>%
dplyr::left_join(drug_map %>% dplyr::select(drug.short2, drug.long), by="drug.short2") %>%
dplyr::left_join(bug_map %>% dplyr::select(species.code, species.short2=species.short), by=c("species.code2"="species.code")) %>%
dplyr::left_join(bug_map %>% dplyr::select(species.code, species.short3=species.short), by=c("species.code3"="species.code")) %>%
dplyr::mutate(species.short=dplyr::case_when(!is.na(species.short2)~species.short2, !is.na(species.short3)~species.short3, T~NA_character_)) %>%
dplyr::mutate(Status=dplyr::case_when(Ctrl=="ctrl"~"ctrl", Ctrl=="smpl"~"sample", Ctrl=="zero"~"GMM")) %>%
dplyr::distinct(Extraction, Sample.Set.Name, SampleName, Channel.Description, Status, Replicate, Vial, mz, RetentionTime, drug.long, species.short)
uplc_interaction_map = readr::read_tsv("data/exp2metabolomics/raw_annotations/uplc_interactions_mapping.tsv")
channel_mappings = uplc_interaction_map %>%
dplyr::mutate(GroupName=gsub("^[0-9]+_|_martina", "", Acq.Method.Set)) %>%
group_by(AssayName, SampleName, Sample.Set.Name, Channel.Description, Vial) %>%
dplyr::mutate(ChannelNo=1:n()) %>%
dplyr::ungroup() %>%
dplyr::select(AssayName, SampleName, Sample.Set.Name, Plate=Plate_Martina, GroupName, Channel.Description, Vial, ChannelNo, Channel.Id)
annotations = uplc_interaction %>%
group_by(SampleName, Sample.Set.Name, Channel.Description, Vial, Replicate) %>%
dplyr::mutate(ChannelNo=1:n()) %>%
dplyr::ungroup()
output = channel_mappings %>%
dplyr::inner_join(annotations, by=c("SampleName", "Sample.Set.Name", "Channel.Description", "Vial", "ChannelNo")) %>%
dplyr::select(-ChannelNo) %>%
dplyr::select(AssayName, Extraction, Sample.Set.Name, Plate, SampleName, GroupName, Status, Replicate, Vial, drug.long, species.short, Channel.Description, Channel.Id)
readr::write_tsv(output, path="data/exp2metabolomics/uplc_channels_annotation.tsv")
#
# Collect all the raw files in one folder
#
if(F) {
input_dir.report = "/g/patil/Share/EmpowerProjects/DepletionModeAssay_Rawraw/Raw_readable"
input_dir.raw = "/g/patil/Share/EmpowerProjects/Martina"
output_dir = "/g/scb2/patil/andrejev/UPLC"
input_dir.report = "data/exp2metabolomics/raw_reports/"
# input_dir.raw = "/g/patil/Share/EmpowerProjects/Martina"
# output_dir = "/g/scb2/patil/andrejev/UPLC"
#
# Collect all reports into single file
#
i.run = 0
reports = data.frame()
reports.100 = data.frame()
for(i in unique(output$Channel.Id)) {
if(i %in% reports$Channel.Id) next
i.file = sprintf(sprintf("%s/report_raw%i.arw", input_dir.report, i))
if(file.exists(i.file)) {
i.run = i.run+1
report.i = readr::read_tsv(i.file, skip=2, col_names=c("RetentionTime", "Intensity"), col_types=readr::cols()) %>% dplyr::mutate(Channel.Id=i)
reports.100 = rbind(reports.100, report.i)
if(i.run %% 100==0) {
print(sprintf("%i/%i (added: %i)", which(i==unique(output$Channel.Id)), length(unique(output$Channel.Id)), i.run))
reports = rbind(reports, reports.100)
reports.100 = data.frame()
}
} else {
writeLines(sprintf("File doesn't exist: %s", i.file))
}
}
reports = rbind(reports, reports.100)
reports_distinct = reports %>% dplyr::distinct(Channel.Id, RetentionTime, .keep_all=T) %>% dplyr::select(Channel.Id, ElutionTime=RetentionTime, Intensity)
reports.wide = reports_distinct %>%
reshape2::dcast(Channel.Id ~ RetentionTime, value.var="Intensity")
readr::write_tsv(reports_distinct, path="data/exp2metabolomics//uplc_raw.tsv", na="")
cmd = list()
if(!file.exists(output_dir)) cmd[["create_dir"]] = sprintf("mkdir %s", output_dir)
if(!file.exists(sprintf("%s/raw", output_dir))) cmd[["create_dir/raw"]] = sprintf("mkdir %s/raw", output_dir)
if(!file.exists(sprintf("%s/reports", output_dir))) cmd[["create_dir/raw"]] = sprintf("mkdir %s/reports", output_dir)
cmd[["copy_reports"]] = sprintf("cp %s/report_raw%i.arw %s/reports", input_dir.report, xy$Channel.Id, output_dir)
cmd[["copy_raw"]] = sprintf("cp %s/d%i.dat %s/raw/", input_dir.raw, xy$Channel.Id, output_dir)
cmd[["zip_reports"]] = sprintf("zip -r %s/reports.zip %s/reports", output_dir, output_dir)
cmd[["zip_raw"]] = sprintf("zip -r %s/raw.zip %s/raw", output_dir, output_dir)
cmd[["copy_eic"]] = sprintf("data/exp2metabolomics/data.eic.csv %s/raw.zip", output_dir)
writeLines(unlist(cmd), con="collect_uplc.sh")
}
}
|
47f61374912f0145b84bebb2833f31491276a9d1
|
cd283a087326a184dc2b97e445a8a09f68cf8a27
|
/man/download_them_all.Rd
|
3f563bbc1eab808037340f2c15fd8daf4805e156
|
[] |
no_license
|
rdisalv2/dismisc
|
351749eb985b7f177ee29e32e20052dca5b05a8b
|
fb1f4fec35ac352b396987a6578f758131677d52
|
refs/heads/master
| 2021-11-23T23:37:18.121555
| 2021-10-29T19:19:21
| 2021-10-29T19:19:21
| 74,858,717
| 0
| 1
| null | 2021-10-29T19:19:21
| 2016-11-27T00:36:46
|
R
|
UTF-8
|
R
| false
| true
| 892
|
rd
|
download_them_all.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download-them-all.R
\name{download_them_all}
\alias{download_them_all}
\title{Download all files in a list of URLs into the working directory}
\usage{
download_them_all(url.vect, skip.already.downloaded = TRUE,
stub.vect = rep("", length(url.vect)))
}
\arguments{
\item{url.vect}{vector of URLs. Files downloaded will be named according to part of the URL after the last forward slash '/'.}
\item{skip.already.downloaded}{whether to skip files that already exist. If FALSE, they are overwritten.}
\item{stub.vect}{vector of strings to paste0 to the front of destfile names, component-by-component. Use if the part after the last forward slash in the URLs is not unique.}
}
\value{
list of successfully downloaded destfile names
}
\description{
Download all files in a list of URLs into the working directory
}
|
07288a401dc95fc95832a93e8fedb6f2a086ef4f
|
72401ef1e9f6cc71c8d270169e71d4f5f8c0c492
|
/ML-Practicas/P5 - Asociation Rules/2 - Eclat/Eclat.R
|
3ff907c1aeeaeb1c864c978b05c7e8a1407cc8e6
|
[
"MIT"
] |
permissive
|
AdyHelvete/machinelearning-az
|
62586f33124221c07e4d745bc6918f774711c04d
|
1f3f059ec48caae131964ac0c2b1c7ef003c730c
|
refs/heads/master
| 2020-05-30T01:19:20.060650
| 2019-10-08T23:44:24
| 2019-10-08T23:44:24
| 189,473,852
| 0
| 0
| null | 2019-05-30T19:54:52
| 2019-05-30T19:54:51
| null |
ISO-8859-3
|
R
| false
| false
| 424
|
r
|
Eclat.R
|
#Eclat
#preprocesado de datos
#install.packager("arules")
library(arules)
#importar dataset
dataset=read.csv("Market_Basket_Optimisation.csv",header = FALSE)
dataset=read.transactions("Market_Basket_Optimisation.csv",
sep=",",rm.duplicates = TRUE)
#Entrenar algoritmo Eclat
rules=eclat(dataset,parameter = list(support=0.003,minlen=2))
#Visualización
inspect(sort(rules,by='support')[1:10])
|
4af1a1032675307b3fe478bd9b94b873d80e3a8f
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/11283_0/rinput.R
|
a5b6d1e109e2ff416578f962f82866ffc76a92ae
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("11283_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="11283_0_unrooted.txt")
|
60a83110a17d8763dd555cca11033c995851d130
|
d597b6e126f7bf5bc30b678d3f4ba32afb10187c
|
/R/old_scripts/Identify_Cell_Types_Manually.R
|
9b5c660442dd35436e15373729ad9064639f45b4
|
[] |
no_license
|
nyuhuyang/scRNAseq-MouseAgedEyes
|
866901330ec8e503ee09e4fa9c18871dee426d66
|
5977d6c7fd3d246dedacd720d8c3d3d0b158d4c1
|
refs/heads/master
| 2022-02-05T00:04:16.098589
| 2019-07-14T04:01:39
| 2019-07-14T04:01:39
| 123,640,888
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,969
|
r
|
Identify_Cell_Types_Manually.R
|
library(Seurat)
library(dplyr)
source("./R/Seurat_functions.R")
#====== 2.1 identify phenotype for each cluster ==========================================
lnames = load(file = "./data/mouse_eyes_alignment.Rda")
lnames
Featureplot <- function(x){
p <- FeaturePlot(object = mouse_eyes,
reduction.use = "tsne",
features.plot = x, min.cutoff = NA,
cols.use = c("lightgrey","blue"), pt.size = 0.5)
return(p)
}
Adipocytes <- MouseGenes(mouse_eyes,c("SLC36A2","P2RX5","MYF5","UCP1","TRIP4","ASCC1"))
Endothelium <- MouseGenes(mouse_eyes,c("Cdh5","Pecam1","Flt1","Plvap","Kdr","ptprb",
"Vwf","EMCN","Car4"))
Epithelium <- MouseGenes(mouse_eyes,c("KRT19","Epcam","KRT5",
"MUC1","SCGB3A2","SCGB1A1","SCGB3A1","SFTPB","FOXJ1","Rpe65",
"Rlbp1","Msln","Upk3b","Lrrn4"))
RPE <- MouseGenes(mouse_eyes,c("Rpe65","Rlbp1"))
Fibroblast <- MouseGenes(mouse_eyes,c("FGF1","FGF9","SFRP1"))
Hematopoietic <- MouseGenes(mouse_eyes,c("PTPRC","LAPTM5","SRGN"))
Myeloid <- MouseGenes(mouse_eyes,c("PPBP","GNG11","HBA2","HBB","Cma1","Mcpt4","Tpsb2",
"Cpa3","LYZ","S100A9","CD14","CCL2","FCGR3A","MS4A7","VMO1"))
Lymphoid <- MouseGenes(mouse_eyes,c("CD3G","CD3D","CD2","Cd19","CD79A","MS4A1",
"GNLY","Ncr1","CCL5","KLRD1","NKG7"))
Melanocytes <- MouseGenes(mouse_eyes,c("Pmel","Mlana"))
Mesenchymal <- MouseGenes(mouse_eyes,c("Pdgfrb","Has2","Dcn"))
Myelinating_Schwann_cells <- MouseGenes(mouse_eyes,c("MBP","MPZ"))
Pericytes <- MouseGenes(mouse_eyes,c("Pdgfrb","Cspg4","Anpep","Rgs5",
"Myh11","Mylk","Des","Vtn","Ifitm1"))
Smooth_muscle_cells <- MouseGenes(mouse_eyes,c("Acta2","Myh11"))
Stem_cell <- MouseGenes(mouse_eyes,c("POU5F1","FUT4","CD34","PROM1","ABCG2","Runx1","ATXN1",
"Nes","NCAM","NGFR"))
Stromal_fibroblasts <- MouseGenes(mouse_eyes,c("DCN","COL6A1","TIMP3","PDGFRA"))
Neurons <- MouseGenes(mouse_eyes,c("Ihh","Gli1", "Ptch1", "Hhip"))
# Featureplot
Featureplot(Adipocytes) # Adipocytes
Featureplot(Endothelium) # Endothelial Cells
Featureplot(Epithelium) # Epithelium
Featureplot(c(RPE,Melanocytes,Myelinating_Schwann_cells)) # RPE, Melanocytes, Myelinating Schwann cells
Featureplot(Fibroblast) # Fibroblasts
Featureplot(c(Hematopoietic,Myeloid[7:9],Lymphoid[1:3])) # Hematopoietic cells
Featureplot(Myeloid) # Myeloid cells
Featureplot(Lymphoid) # Lymphoid cells
Featureplot(Mesenchymal) # Mesenchymal cells
Featureplot(Pericytes) # Pericytes
Featureplot(Smooth_muscle_cells)
Featureplot(Stem_cell)
Featureplot(Stromal_fibroblasts)
Featureplot(Neurons)
markers.to.plot <- c(Melanocytes,Myelinating_Schwann_cells,Endothelium[c(1:3,5,7)],
Hematopoietic[1:2],Pericytes[c(4,6:7)],Mesenchymal[c(1,4)],RPE,
Smooth_muscle_cells)
markers.to.plot <- MouseGenes(mouse_eyes,markers.to.plot,unique=T)
DotPlot(mouse_eyes, genes.plot = rev(markers.to.plot),
cols.use = c("blue","red"), x.lab.rot = T, plot.legend = F,
dot.scale = 8, do.return = T)
markers.to.plot <- c("Pmel","Dcn", "Laptm5","Mbp", "Sfrp1","Cd14",
"Flt1", "Kdr", "Vwf","Rgs5","Rpe65")
sdp <- SplitDotPlotGG(mouse_eyes, genes.plot = rev(markers.to.plot),
cols.use = c("grey","blue"), x.lab.rot = T, plot.legend = T,
dot.scale = 8, do.return = T, grouping.var = "conditions")
# Rename ident
table(mouse_eyes@ident)
idents <- as.data.frame(table(mouse_eyes@ident))
old.ident.ids <- idents$Var1
new.cluster.ids <- c("Pericytes 0",
"RPE 1",
"Endothelial cells 2",
"Pericytes 3",
"Endothelial cells 4",
"RPE 5",
"Smooth muscle cells 6",
"Pericytes 7",
"Monocytes 8",
"schwann cells 9",
"Endothelial cells 10",
"T cells 11",
"Monocytes 12")
mouse_eyes@ident <- plyr::mapvalues(x = mouse_eyes@ident,
from = old.ident.ids,
to = new.cluster.ids)
markers.to.plot <- c(Hematopoietic[1:2], Lymphoid[1:2],Myelinating_Schwann_cells,
Myeloid[c(7,9)],Pericytes[c(4,6:7)],Mesenchymal[c(1,3)],
Smooth_muscle_cells, Endothelium[c(1:3,5,7)],RPE,Melanocytes)
#markers.to.plot <- unique(markers.to.plot)
DotPlot(mouse_eyes, genes.plot = rev(markers.to.plot),
cols.use = c("blue","red"), x.lab.rot = T, plot.legend = F,
dot.scale = 8, do.return = T)
# mouse_eyes <- RenameIdentBack(mouse_eyes)
#====== 2.2 dot Plots ==========================================
lnames = load(file = "./data/mouse_eyes_alignment.Rda")
lnames
table(mouse_eyes@ident)
idents <- as.data.frame(table(mouse_eyes@ident))
old.ident.ids <- idents$Var1
new.cluster.ids <- c("Pericytes",
"Retinal pigmented epithelium",
"Endothelial cells",
"Pericytes",
"Endothelial cells",
"Retinal pigmented epithelium",
"Smooth muscle cells",
"Pericytes",
"Monocytes",
"Myelinating schwann cells",
"Endothelial cells",
"T cells",
"Monocytes")
mouse_eyes@ident <- plyr::mapvalues(x = mouse_eyes@ident,
from = old.ident.ids,
to = new.cluster.ids)
markers.to.plot <- c(Melanocytes,Myelinating_Schwann_cells,Hematopoietic[1:2],
RPE,Smooth_muscle_cells,Endothelium[c(1:3,5,7)],
Pericytes[c(4,6:7)],Mesenchymal[c(1,3)])
markers.to.plot <- unique(markers.to.plot)
DotPlot(mouse_eyes, genes.plot = rev(markers.to.plot),
cols.use = c("blue","red"), x.lab.rot = T, plot.legend = F,
dot.scale = 8, do.return = T)
freq_table <- prop.table(x = table(mouse_eyes@ident, mouse_eyes@meta.data[, "conditions"]),
margin = 2)
barplot(height = freq_table)
freq_table
table(mouse_eyes@meta.data[, "conditions"])
#=====2.3 tsne plot=============================
lnames = load(file = "./data/mouse_eyes_alignment.Rda")
lnames
table(mouse_eyes@ident)
idents <- as.data.frame(table(mouse_eyes@ident))
old.ident.ids <- idents$Var1
new.cluster.ids <- c("0) Pericytes",
"1) Retinal pigmented epithelium",
"2) Endothelial cells",
"3) Pericytes",
"4) Endothelial cells",
"5) Retinal pigmented epithelium",
"6) Smooth muscle cells",
"7) Pericytes",
"8) Monocytes",
"9) Myelinating schwann cells",
"10) Endothelial cells",
"11) T cells",
"12) Monocytes")
mouse_eyes@ident <- plyr::mapvalues(x = mouse_eyes@ident,
from = old.ident.ids,
to = new.cluster.ids)
TSNEPlot(object = mouse_eyes, no.legend = TRUE, do.label = TRUE,
do.return = TRUE, label.size = 5)+
ggtitle("TSNE plot of major cell types")+
theme(text = element_text(size=20), #larger text including legend title
plot.title = element_text(hjust = 0.5)) #title in middle
#====== 2.4 Compare cell type changes across conditions ==========================================
# the two patients profiled have very different composition
# Compare clusters for each dataset
SplitTSNEPlot(mouse_eyes, "conditions",do.label = F,
do.return = TRUE, no.legend = T )
|
23a6eb0a88e42c186a17b0578561d925e4e6fda6
|
e1a29e5d370645f136f1bd005f51f6246358d778
|
/Analyse de données et Données temporelles/TP2/scriptTP2.R
|
34661e78f84637a74814093da82082eee544e6a7
|
[] |
no_license
|
wangyuteng1997/BigData_EMSE
|
50ce9d4962703823ada9c6223410ba4f02be25d4
|
960ec034e78a7906b3d33fa10f43267f41c3210e
|
refs/heads/master
| 2023-07-30T12:43:34.853146
| 2021-09-30T09:56:35
| 2021-09-30T09:56:35
| 412,010,460
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,424
|
r
|
scriptTP2.R
|
# Script du TP n?2, S?ries Temporelles, Majeure Science des Donn?es 2020-21
##############################################################################
# PARTIE 1 : Etude d'un AR(2)
# Mod?le X(t) = mu + phi1*(X(t-1)-mu) + phi2*(X(t-2)-mu) + Z(t)
# Z[t] bruit blanc gaussien N(0,varZ)
# simulation d''un AR(2) par une phase initiale de stationnarisation
##############################################################################
# inverses des racines du polyn?me P(z) = 1 - phi1*z - phi2*Z^2
# cas de deux racines r?elles dans ]-1, 1[
r1 <- 0.9
theta <- 60
phi1 <- 2*r*cos(2*pi*theta/360)
phi2 <- r*r # param?tres AR(2)
mu <- 0 # moyenne du processus X[t]
sigZ <- 1 # ?cart-type du bruit Z[t]
# simulation avec r?gime transitoire de taille ninit = 50
ninit <- 50
n <- 200
ntot <- ninit + n
xtot <- rep(0,ntot)
xtot[1] <- 0
xtot[2] <- 0
for (t in 3:ntot) xtot[t] <- phi1*xtot[t-1] + phi2*xtot[t-2] + sigZ*rnorm(1)
xtot <- mu + xtot # d?centrage
xinit <- xtot[1:ninit] # r?gime transitoire (initial)
xstat <- xtot[(ninit+1):ntot] # r?gime stationnaire --> AR(2) de taille n
# visualisation r?gime transient
plot(xtot, type='o', xlab="Temps t", main = "AR(2) simul? avec r?gime transitoire, col="grey")
lines((ninit+1):ntot, xstat, type='o')
abline(mu, 0, col="red", lwd=2)
# analyse graphique - chronogramme de la s?rie xstat
plot(xstat,type='o',xlab='Temps t',main = "Simulation d'un AR(2)")
abline(mu,0,col='red')
# acf et pacf de la s?rie simul?e
op <- par(mfrow = c(1,2))
ro <- acf(xstat, lag=15, ylim = c(-1,1), main = "ACF empirique")
alpha <- pacf(xstat, lag=15, ylim = c(-1,1), main = "et PACF", xlim=c(0,15))
par(op)
###############################################################################
# PARTIE 2 : identification de mod?les
###############################################################################
# On commence avec la premi?re s?rie de donn?es, fichier "serie1.Rdata"
rm(list=ls()) # clear all
load("serie1.Rdata")
ls.str()
# chronogramme de la s?rie
plot(serie, type='o', xlab="Temps t", ylab="", main = "data")
abline(h=0, col="red", lwd=2)
# acf et pacf de la s?rie
op <- par(mfrow = c(1,2))
ro <- acf(serie, lag=15, ylim = c(-1,1), main = "ACF empirique")
alpha <- pacf(serie, lag=15, ylim = c(-1,1), main = "et PACF empirique", xlim=c(0,15))
par(op)
# seconde s?rie : fichier "serie2.Rdata"
...
|
db614398f6900d7fff4e1fbae3919134196c655d
|
dac72810232b9e93e57b05ffa9030d3bc69e93d0
|
/ui_visualization_pca.R
|
dc8a147501cfe59a999710255a6ef9343d4bcf53
|
[] |
no_license
|
zhezhangsh/GeEx
|
cc7c0bea9d94a60fba4d8998e67e22baf98561e4
|
9689595d41ed2134538577c6427670f0626e2317
|
refs/heads/master
| 2020-06-14T10:39:03.278202
| 2017-03-01T01:14:38
| 2017-03-01T01:14:38
| 75,196,993
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,422
|
r
|
ui_visualization_pca.R
|
tabPanel(
"Principal components analysis",
h2("Principal Components Analysis"),
h5(HTML(paste(
'Unsupervised clustering of samples in the same data set by',
'<a href="https://en.wikipedia.org/wiki/Principal_component_analysis" target="_blank">PCA</a>'))),
conditionalPanel(
condition = 'input[["meta.options"]] == "" ',
hr(),
list(h3(msg.nocollection))
),
conditionalPanel(
condition = 'input["meta.options"] != ""',
fluidRow(
column(
5,
wellPanel(
div(style="display: inline-block; width: 125px; vertical-align: top", h5(HTML("<b>Select data set:</b>"))),
div(style="display: inline-block; width: 60%;", selectizeInput("pca.dataset", NULL, NULL)), br(),
div(style="display: inline-block; width: 125px; vertical-align: top", h5(HTML("<b>Select groups:</b>"))),
div(style="display: inline-block; width: 60%;", selectizeInput("pca.group", NULL, NULL, multiple=TRUE)), br(),
div(style="display: inline-block; width: 125px", h5(HTML("<b>Select PCs:</b>"))),
div(style="display: inline-block;", selectizeInput("pca.x", NULL, NULL, width='90px')),
div(style="display: inline-block;", h5(HTML("    "))),
div(style="display: inline-block;", selectizeInput("pca.y", NULL, NULL, width='90px')), br(),
div(style="display: inline-block; width: 125px", h5(HTML("<b>Select colors:</b>"))),
div(style="display: inline-block;", selectizeInput("pca.color", NULL, GetColorTypes(), width='150px')), br(),
div(style="display: inline-block;", checkboxInput('pca.sample', HTML(geex.html.msg('Highlight sample(s)')))),
conditionalPanel(
condition = 'input[["pca.sample"]] == true',
DT::dataTableOutput('pca.table'),
geex.clear.button('pca.clear')
), br(),
div(
style="display: inline-block; padding: 15px",
geex.geneset.ui("pca.geneset.filter", 'Re-run PCA using known gene set(s)', 'pca.geneset.source',
'pca.geneset.coll', 'pca.geneset.species', 'pca.geneset.table', 'pca.geneset.clear')
)
)
),
column(
7,
actionButton('pca.plot.button', 'Create plot', icon=icon('area-chart'), class='dB'),
plotlyOutput('pca.plot', height="600px")
)
)
)
)
|
118c04fd2d7ba91ccf2d80a30515dbdd03ba23ea
|
841acbc6ac8197981e81c9c837def244392c5b72
|
/R/Clam_pop_equations.R
|
27c4811a124498fa90f56e2dd4812c53d40695d7
|
[] |
no_license
|
cran/RAC
|
ac1e03ef30dce32436a5370e0d7df7223a49a7a9
|
901ce405e395e1d4c0d0536aff346c90ce8da0b6
|
refs/heads/master
| 2023-05-12T13:56:30.793720
| 2023-05-02T12:00:02
| 2023-05-02T12:00:02
| 70,978,339
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,739
|
r
|
Clam_pop_equations.R
|
#' Clam bioenergetic population model differential equations
#'
#' @param Param a vector containing model parameters
#' @param Tint the interpolated water temperature at time t
#' @param Phy the interpolated phytoplankton at time t
#' @param DT the interpolated detritus at time t
#' @param POCint the interpolated POC at time t
#' @param POMint the interpolated POM at time t
#' @param TSSint the interpolated TSS at time t
#' @param Wd the weight of the clam at time t
#'
#' @return a list containing the clam weights, temperature limitation functions and metabolic rates at time t
#'
#' @import matrixStats plotrix rstudioapi
#'
#' @import stats utils
#'
Clam_pop_equations <- function(Param, Tint, Phy, DT, POCint, POMint, TSSint, Wd){
# Parameters definition
epsB=Param[1] # [J/g] Tissue energy content
epsDT=Param[2] # [J/mgC] Detritus energy content
epsPhy=Param[3] # [J/mgC] Phytoplankton energy content
epsO2=Param[4] # [mlO2/h] Energy consumed by the respiration of 1g of oxygen
alpha=Param[5] # [-] Feeding catabolism
CRmax=Param[6] # [l/d gDM] Maximum filtration rate
AEmax=Param[7] # [-] Maximum adsorption efficiency
Rmax=Param[8] # [mgO2/d gDM] maximum respiration rate
Amax=Param[9] # [J/d g] Maximum ingestion rate for 1g o mussel
q=Param[10] # [-] Weight exponent for filtration
Ks=Param[11] # [-] Half-saturation constant for AE
betaa=Param[12] # [1/Celsius degree] Temperature exponent fot anabolism
betac=Param[13] # [1/Celsius degree] Temperature exponent for catabolism
Tma=Param[14] # [Celsius degree] Maximum temperature for the anabolic process
Toa=Param[15] # [Celsius degree] Optimum temperature for the anabolic process
Tmc=Param[16] # [Celsius degree] Maximum temperature for the catabolic process
Toc=Param[17] # [Celsius degree] Optimum temperature for the catabolic process
aF=Param[18] # [-] Dry weight - wet weight conversion coefficient
bF=Param[19] # [-] Dry weight - wet weight exponent
a=Param[20] # [-] Dry weight - length conversion coefficient
b=Param[21] # [-] Dry weight - length exponent
lambda=Param[22] # [g/mg] Chlorophyll a - Phytoplankton conversion factor
# CATABOLISM
# Optimum temperature dependence for catabolism [dimensionless]
if (Tint >= Tmc) {
fc=0.0
} else {
fc=((Tmc-Tint)/(Tmc-Toc))^(betac*(Tmc-Toc))*exp(betac*(Tint-Toc))
}
C=Rmax*epsO2*fc*Wd # Daily catabolism [J/day]
# ANABOLISM
# Optimum temperature dependence for anabolism [dimensionless]
if (Tint >= Tma){
fa=0.0
}else {
fa=((Tma-Tint)/(Tma-Toa))^(betaa*(Tma-Toa))*exp(betaa*(Tint-Toa))
}
I=(CRmax*fa*Wd^q)*(DT*epsDT+Phy*epsPhy) # Daily ingestion [J/day]
encont=(DT*epsDT+Phy*epsPhy) # Energy content of ingested food [J/l]
Q=((POMint/TSSint)) # POM/TSS ratio [-]
if (Q>=1){
Q=1
}
AE=(Q/(Q+Ks)) # Limitation on ingested energy [-]
E=I*AE # Total ingested energy [J/d]
Aing=(1-alpha)*E # Daily anabolism [J/d]
# Daily anabolism limitation
if (Aing<Amax*Wd^q*fa){
A=Aing # Anabolic rate [J/d]
filt=CRmax*fa*Wd^q # [l/d]
A1=Amax*Wd^q*fa # Maximum anabolic rate [J/d]
}else{
A=Amax*Wd^q*fa # Anabolic rate [J/d]
filt=CRmax*fa*Wd^q # [l/d]
A1=Amax*Wd^q*fa # Maximum anabolic rate [J/d]
}
# Mass balance
dWd=((A)-C)/epsB # Weight increment [g/d]
tfun=cbind(fa, fc)
metab=cbind(A, C)
# Function outputs
output=list(dWd,tfun,metab)
return(output)
}
|
9a92dde1090de17b1fbc8e1e4bf4d5bd329b0e50
|
f3a536ed29038ba6e4b2c24db93a2b8f3ba52378
|
/data/example-lab-creation.R
|
a6c40d02d65016f4ae8d6fd2ae1ce3b3f77eeebc
|
[] |
no_license
|
jhcreed/tidyverse-instructor-exam
|
58c1881730f169f6b8e17614bd11069f53b93b9e
|
e865b40efe4be98f7691a91dcf8e5bd834f1cf9c
|
refs/heads/main
| 2023-02-01T06:29:13.342174
| 2020-12-16T13:51:48
| 2020-12-16T13:51:48
| 321,205,499
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 870
|
r
|
example-lab-creation.R
|
library(tidyverse)
set,seed(8675309)
patient_id <- sample(10000:99999, size = 100)
height <- sample(c(60:84, NA), size = 100, replace = TRUE)
weight <- sample(c(NA, 110:270), size = 100, replace = TRUE)
systolic <- sample(100:180, size = 100, replace = TRUE)
diastolic <- sample(60:130, size = 100, replace = TRUE)
gender <- sample(c("Male", "Female"), size = 100, replace = TRUE)
example_data <- data.frame(patient_id,
gender,
height,
weight,
blood = paste0(systolic, "/", diastolic)
)
messy_data <- example_data %>%
mutate_at(c("height", "weight"), as.character) %>%
pivot_longer(!c(patient_id, gender), names_to = "test", values_to = "values")
example_lab <- messy_data
save(example_lab, file = "data/example_lab.RData")
|
6f422bfef4482a22f44e5d6e95ea001eeb97093e
|
f8a18363479a1aa79a95068c8ee336116b83cf55
|
/tests/testthat/test.run-ed.R
|
0ac20f7b25bb85812094bf63b03fc5d59bddc985
|
[
"MIT"
] |
permissive
|
FoRTExperiment/ed4forte
|
938f9eafeee89b6802546f2b160f91eb4ab5c334
|
44ba5f92d5cff41a0017af194eebea150f26feef
|
refs/heads/master
| 2020-12-08T18:28:36.033182
| 2020-03-19T16:34:50
| 2020-03-19T16:34:50
| 233,060,861
| 3
| 0
|
NOASSERTION
| 2020-02-05T19:17:44
| 2020-01-10T14:11:52
|
R
|
UTF-8
|
R
| false
| false
| 2,193
|
r
|
test.run-ed.R
|
outdir <- tempfile()
teardown(unlink(outdir, recursive = TRUE))
test_that("Running ED2 from bare ground works", {
p <- run_ed2(
outdir, start_date, end_date,
configxml = data.frame(num = 9, SLA = 35),
wd = wd,
ED_MET_DRIVER_DB = test_met
)
p$wait()
# Make sure the run completed successfully
plog <- readLines(p$get_output_file())
expect_match(tail(plog, 1), "ED-2.2 execution ends", fixed = TRUE,
info = tail(plog, 50))
# ...generated the right input files
expect_true(file.exists(file.path(outdir, "ED2IN")))
expect_true(file.exists(file.path(outdir, "config.xml")))
expect_true(file.exists(file.path(outdir, "history.xml")))
cfg <- read_configxml(file.path(outdir, "config.xml"))
expect_equal(cfg$SLA, 35)
expect_equal(cfg$num, 9)
hist <- xml2::read_xml(file.path(outdir, "history.xml"))
sla9 <- as.numeric(xml2::xml_text(xml2::xml_find_all(
hist,
"/config/pft[num=9]/SLA[text()]"
), trim = TRUE))
expect_equal(sla9, 35)
# ...and produced output
outfile <- file.path(outdir, "analysis-E-2004-07-00-000000-g01.h5")
expect_true(file.exists(outfile))
})
test_that("Reading ED2 monthly output works", {
suppressWarnings(results <- read_monthly_dir(outdir))
expect_is(results, "data.frame")
expect_equal(nrow(results[["df_scalar"]][[1]]), nmonths)
expect_equal(nrow(results[["df_pft"]][[1]]), nmonths * 17)
expect_equal(
results[["df_scalar"]][[1]][["datetime"]],
as.POSIXct("2004-07-01", tz = "UTC")
)
expect_true(file.exists(file.path(outdir, "monthly-output.rds")))
})
# Test the overwrite argument.
ptrue <- run_ed2(
file.path(basedir, "default"),
"1980-01-01", "1990-01-01",
ED_MET_DRIVER_DB = narr_met,
overwrite = TRUE
)
pNull <- run_ed2(
file.path(basedir, "default"),
"1980-01-01", "1990-01-01",
ED_MET_DRIVER_DB = narr_met,
overwrite = NULL
)
pFalse <- run_ed2(
file.path(basedir, "default"),
"1980-01-01", "1990-01-01",
ED_MET_DRIVER_DB = narr_met,
overwrite = FALSE
)
testthat::expect_error({pError <- run_ed2(
file.path(basedir, "default"),
"1980-01-01", "1990-01-01",
ED_MET_DRIVER_DB = narr_met,
overwrite = NA
)}, 'run canceled')
|
66904819c80b906b2b08ed2f8b07cbfb35afa133
|
753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed
|
/service/paws.cognitoidentityprovider/man/set_user_pool_mfa_config.Rd
|
88e63f98556b19e2b9d82f269471aec2609a315b
|
[
"Apache-2.0"
] |
permissive
|
CR-Mercado/paws
|
9b3902370f752fe84d818c1cda9f4344d9e06a48
|
cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983
|
refs/heads/master
| 2020-04-24T06:52:44.839393
| 2019-02-17T18:18:20
| 2019-02-17T18:18:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,065
|
rd
|
set_user_pool_mfa_config.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.cognitoidentityprovider_operations.R
\name{set_user_pool_mfa_config}
\alias{set_user_pool_mfa_config}
\title{Set the user pool MFA configuration}
\usage{
set_user_pool_mfa_config(UserPoolId, SmsMfaConfiguration = NULL,
SoftwareTokenMfaConfiguration = NULL, MfaConfiguration = NULL)
}
\arguments{
\item{UserPoolId}{[required] The user pool ID.}
\item{SmsMfaConfiguration}{The SMS text message MFA configuration.}
\item{SoftwareTokenMfaConfiguration}{The software token MFA configuration.}
\item{MfaConfiguration}{The MFA configuration.}
}
\description{
Set the user pool MFA configuration.
}
\section{Accepted Parameters}{
\preformatted{set_user_pool_mfa_config(
UserPoolId = "string",
SmsMfaConfiguration = list(
SmsAuthenticationMessage = "string",
SmsConfiguration = list(
SnsCallerArn = "string",
ExternalId = "string"
)
),
SoftwareTokenMfaConfiguration = list(
Enabled = TRUE|FALSE
),
MfaConfiguration = "OFF"|"ON"|"OPTIONAL"
)
}
}
|
a02647c14ccecbfe7c6e9d82a1b745d136694d2c
|
3e30053945a0b0012901e4b33fd95c39bd8f154b
|
/DAISIE/tests/testthat/test_DAISIE.R
|
ca77c6935f5440038759127b053260235ab5de4b
|
[] |
no_license
|
richelbilderbeek/DAISIE
|
e0442c1dcf278642ee1c4a57bb817f1e27ec7e5a
|
f476f5bcda00909e86beb73869ca23a6fdeaf52f
|
refs/heads/master
| 2020-03-19T11:10:12.881461
| 2018-12-12T17:40:36
| 2018-12-12T17:40:36
| 136,436,234
| 0
| 1
| null | 2018-09-19T09:02:56
| 2018-06-07T07:05:37
|
R
|
UTF-8
|
R
| false
| false
| 79
|
r
|
test_DAISIE.R
|
context("test_DAISIE")
test_that("DAISIE works", {
DAISIE_test()
})
|
6c05d88e3dcc10597636bf02a15171987beb0828
|
ddc2681b21caef8d7d490211dba79c40eceea301
|
/plot2.R
|
644bac8a91f48223ea1355072c31a38f26277372
|
[] |
no_license
|
nvobugari/ExData_Plotting1
|
57591f99dd65fd3d7c27fcc9899656a83996c77a
|
e19f968c1c6e8bd65432ae61e86e7b00fe77fad0
|
refs/heads/master
| 2021-01-12T21:10:28.322542
| 2014-05-11T20:13:41
| 2014-05-11T20:13:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,326
|
r
|
plot2.R
|
##plot2.R
## histogram of Global Active POwer usage on 2007-02-01 and 2007-02-02
##reading data from local file and loading it to a variable powerData
# reads the data from household_power_consumption.txt which is located in the directory specified
powerData <- read.table('Assignments/Exploratory Data Analysis/household_power_consumption.txt', sep=';',header=T,
colClasses = c('character', 'character', 'numeric',
'numeric', 'numeric', 'numeric',
'numeric', 'numeric', 'numeric'),
na.strings='?')
##pasting Data and Time variables and formatting them into the required format.
powerData$DateTime <- strptime(paste(powerData$Date, powerData$Time), "%d/%m/%Y %H:%M:%S")
##subsetting the data for the required Dates
powerData <- subset(powerData,
as.Date(DateTime) >= as.Date("2007-02-01") &
as.Date(DateTime) <= as.Date("2007-02-02"))
##creating the plot.png
png("plot2.png", height=480, width=480)
##creating the plot
plot(powerData$DateTime,
powerData$Global_active_power,
pch=NA,
xlab="",
ylab="Global Active Power (kilowatts)")
lines(powerData$DateTime, powerData$Global_active_power)
##closing the PNG file
dev.off()
|
be5fe88f6acdff7c3ae9dd077cca193ac6ec2c70
|
d1122b263472a3edc42098d9a10897418b85b631
|
/kody/neural.R
|
83f3bfe96fd66790fc5844f7a4309973fabc71bc
|
[] |
no_license
|
kapik001/sd-analysis
|
1d16ca440e3956224840936b0d67c3e6e74f07df
|
fe7a876fe6fd19dde8532a2fcf3b3faee41a1656
|
refs/heads/master
| 2023-01-22T04:02:55.890649
| 2020-11-12T18:09:47
| 2020-11-12T18:09:47
| 312,352,935
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,434
|
r
|
neural.R
|
#run before
loadedData=stockData$load('AAPL', 365)
#on start
probData <- c()
df <- data.frame(
Today=integer(),
Tomorrow=integer(),
X1=integer(),
X2=integer(),
X3=integer(),
X5=integer(),
X10=integer())
df.Today <- c()
df.Tomorrow <- c()
df.X1 <- c()
df.X2 <- c()
df.X3 <- c()
df.X5 <- c()
df.X10 <- c()
lengthOfData=length(loadedData)
#filling with 'isGrowing'
for (i in 20 : (lengthOfData - 2)) {
df.Tomorrow[i-20] <- if (loadedData[[i + 1]]$close > loadedData[[i]]$close) 1 else 0
df.Today[i-20] <- if(loadedData[[i]]$close > loadedData[[i - 1]]$close) 1 else 0
df.X1[i-20] <-if (loadedData[[i - 1]]$close > loadedData[[i - 2]]$close) 1 else 0
df.X2[i-20] <-if (loadedData[[i - 2]]$close > loadedData[[i - 3]]$close) 1 else 0
df.X3[i-20] <-if (loadedData[[i - 3]]$close > loadedData[[i - 4]]$close) 1 else 0
df.X5[i-20] <-if (loadedData[[i - 5]]$close > loadedData[[i - 6]]$close) 1 else 0
df.X10[i-20] <-if (loadedData[[i - 10]]$close > loadedData[[i - 11]]$close) 1 else 0
}
nn = neuralnet(Tomorrow ~ Today + X1 + X2 + X3 + X5 + X10, data = data.frame(Tomorrow = df.Tomorrow, Today = df.Today, X1 = df.X1, X2 = df.X2, X3 = df.X3, X5 = df.X5, X10 = df.X10))
#on next
logger$put('day:')
logger$put(iter)
logger$put('close:')
logger$put(data[[iter]]$close)
if (iter >= 20) {
Today <- if(data[[iter]]$close > data[[iter - 1]]$close) 1 else 0
X1 <- if (data[[iter - 1]]$close > data[[iter - 2]]$close) 1 else 0
X2 <- if (data[[iter - 2]]$close > data[[iter - 3]]$close) 1 else 0
X3 <- if (data[[iter - 3]]$close > data[[iter - 4]]$close) 1 else 0
X5 <- if (data[[iter - 5]]$close > data[[iter - 6]]$close) 1 else 0
X10 <- if (data[[iter - 10]]$close > data[[iter - 11]]$close) 1 else 0
predict <- compute(nn, data.frame(Today = Today, X1 = X1, X2 =X2, X3 = X3, X5 = X5, X10 = X10))
probData = c(probData, predict$net.result)
if(predict$net.result > 0.5) {
buyer$buy(data[[iter]]$close)
logger$put('buy at:')
logger$put(data[[iter]]$close)
} else {
buyer$sell(data[[iter]]$close)
logger$put('sell at:')
logger$put(data[[iter]]$close)
}
}
#on end
logger$put('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
logger$put('P datas: ')
for (m in probData) {
logger$put(toString(m))
}
logger$put('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
logger$put(buyer$result(data[[iter]]$close))
|
fd0070103dafc4d1fe1554333096871db2656194
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/weights/examples/wtd.cors.Rd.R
|
c667de3d24d9fb2b1916bd3d0bc6ed99f4c972bc
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 333
|
r
|
wtd.cors.Rd.R
|
library(weights)
### Name: wtd.cors
### Title: Produces weighted correlations quickly using C.
### Aliases: wtd.cors
### Keywords: ~correlation ~Pearson ~weights
### ** Examples
test <- c(1,1,1,1,1,1,2,2,2,3,3,3,4,4)
t2 <- rev(test)
weight <- c(.5,.5,.5,.5,.5,1,1,1,1,2,2,2,2,2)
wtd.cors(test, t2)
wtd.cors(test, t2, weight)
|
d78c097f9e432d1e0574dcee0e005ef883164de3
|
9e1fb0a1b66f7446ae501467b4eb2f77fda6215c
|
/functions/wgcna_heatmaps.R
|
81f0fec282ffeb3cbd5df3468adb9425f9025bb9
|
[] |
no_license
|
mayerlelab/metabolicSubtypes
|
df2ecc278ff16ab3b7d924d36549d3bd854a0cc6
|
bdb3e7d0a2909538e193db9a9c80ce87f76ac21d
|
refs/heads/master
| 2023-06-17T00:44:56.806875
| 2021-07-12T11:08:04
| 2021-07-12T11:08:04
| 385,206,511
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,652
|
r
|
wgcna_heatmaps.R
|
#' wgcna_heatmaps (from WGCNA functions).
#'
#' This function implements module-trait correlation heatmap plotting with wgcna.
#'
#' @param datExpr Tranposed input matrix with gene names as column names and sample names as row names.
#' @param MEs Module eigengenes.
#' @param datTraits Phenotype data.
#' @param projectfolder File path where to save the output to. Defaults to working directory. Here, it saves the output to a subfolder called "WGCNA".
#' @return Writes Module_trait_correlations.txt and Module_CorPval.txt Plots module-trait correlation heatmap.
#' @export
wgcna_heatmaps <- function(MEs, datExpr, datTraits, projectfolder = getwd()){
if (!requireNamespace("WGCNA", quietly = TRUE)) {
stop("WGCNA needed for this function to work. Please install it.",
call. = FALSE)
}
if (!file.exists(file.path(projectfolder, "WGCNA"))) {dir.create(file.path(projectfolder, "WGCNA")) }
# names (colors) of the modules
modNames = substring(names(MEs), 3) # remove "ME" at the beginning of module eigengene names
### Correlation of Eigengenes with traits (separated by trait type)
# initialisation of data.frames
moduleTraitCor <- data.frame(row.names=colnames(MEs))
moduleTraitPvalue <- data.frame(row.names=colnames(MEs))
for(i in 1:length(colnames(datTraits))) {
# if phenotype is numeric variable, correlation with expression by pearson correlation
moduleTraitCor[i] <- as.data.frame(WGCNA::cor(MEs, datTraits[i], use="pairwise.complete.obs"))
names(moduleTraitCor)[i] <- paste("WGCNA::cor",names(datTraits)[i], sep=".")
# Calculation of correlation p-value independantly of type of correlation
moduleTraitPvalue[i] <- WGCNA::corPvalueStudent(as.matrix(moduleTraitCor[i]), nrow(datTraits)) # not used for Dendrogram
colnames(moduleTraitPvalue)[i] <- paste0("p.", colnames(moduleTraitCor)[i])
}
utils::write.table(moduleTraitCor, file.path(projectfolder, "WGCNA", "Module_trait_correlations.txt"), row.names = T, col.names = T, sep="\t")
utils::write.table(moduleTraitPvalue, file.path(projectfolder, "WGCNA", "Module_CorPval.txt"), row.names = T, col.names = T, sep="\t")
cat("\n-------------------------\n", "Module trait correlations saved to", file.path(projectfolder, "WGCNA", "Module_trait_correlations.txt"), "and", file.path(projectfolder, "WGCNA", "Module_CorPval.txt"), "\n-------------------------\n")
#### HEATMAP Module-trait relationships
# Will display correlations and their p-values, we color code each association by the correlation value:
textMatrix = paste(signif(as.matrix(moduleTraitCor), 2), " (", signif(as.matrix(moduleTraitPvalue), 1), ")", sep = "")
dim(textMatrix) = dim(moduleTraitCor)
colnames(textMatrix) <- colnames(moduleTraitCor)
rownames(textMatrix) <- rownames(moduleTraitCor)
# Display the correlation values within a heatmap plot
par(mar = c(6, 10, 4, 4));
# Display the correlation values within a heatmap plot
WGCNA::labeledHeatmap(Matrix = moduleTraitCor,
xLabels = colnames(moduleTraitCor),
yLabels = names(MEs),
ySymbols = names(MEs),
colorLabels = FALSE,
colors = colorRampPalette(c("green", "white", "red"))(n = 50),
textMatrix = textMatrix,
setStdMargins = FALSE,
cex.text = 1,
cex.lab = 0.8,
zlim = c(-1,1),
main = "Module-trait relationships - normalised beta values")
}
# devtools::document()
|
61633627e6884e8a18a4ad2cd79664ae97ad975c
|
837a3177789464eabb12b7abfb12e8621feb71fb
|
/(0)Functions_general/getTotalNumbers.R
|
35a39f11cf4c3cfb496ec98fdc67bd300b59dbc7
|
[] |
no_license
|
mcgregorv/AtlantisRscripts
|
21140225d43ba583a1bebc70557c8cb5f61b9b5c
|
1009f0d1961fc95bc4a98d25eea7dc1d7cccee77
|
refs/heads/master
| 2020-07-11T14:20:34.142538
| 2019-08-26T22:22:13
| 2019-08-26T22:22:13
| 204,566,512
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 365
|
r
|
getTotalNumbers.R
|
getTotalNumbers<-function(Name,Cohort,startBox,stopBox){
#assumes has availabilt init which is the biol initial conditions file lines
thisVar<-paste("^",Name,Cohort,"_Nums",sep="")
x<-grep(thisVar,init)
thisLine<-init[(x+startBox):(x+stopBox)]
xx<-as.double(get_first_number(thisLine,n="all"))
totalNums<-sum(xx,na.rm=TRUE)
return(totalNums)
}
|
8e4d5ebc767dadede13900418528fda27a852ad6
|
64d17a1fd8b94a76ce906ca702616d321f3aa93f
|
/src/R/cspa-wordcloud.R
|
79326c05f064ad9990057a5985ed9a8c66bf35ed
|
[
"MIT"
] |
permissive
|
FranckCo/Stamina
|
bd32101ec19a6b9167dca5b86c459efac5e50ad8
|
20843a1e56d4e9512cbe0089b07af6dda7bb6af1
|
refs/heads/master
| 2022-12-01T02:29:32.287326
| 2022-08-21T10:41:24
| 2022-08-21T10:41:24
| 40,617,438
| 2
| 1
|
MIT
| 2022-11-16T03:18:05
| 2015-08-12T18:30:14
|
Java
|
WINDOWS-1252
|
R
| false
| false
| 567
|
r
|
cspa-wordcloud.R
|
cspa <- VCorpus(DirSource(encoding = "UTF-8"), readerControl = list(language = "en"))
cspa <- tm_map(cspa, content_transformer(tolower))
cspa <- tm_map(cspa, removeWords, stopwords("english"))
removeDoubleQuotations <- function(x) gsub("“”", "", x)
cspa <- tm_map(cspa, removePunctuation)
cspa <- tm_map(cspa, content_transformer(removeDoubleQuotations))
cspa <- tm_map(cspa, stripWhitespace)
cspa <- tm_map(cspa, removeNumbers)
wordcloud(cspa, scale=c(5,0.5), max.words=100, random.order=FALSE, rot.per=0.35, use.r.layout=FALSE, colors=brewer.pal(8, "Dark2"))
|
eb0c561c9b1b1e59a89a1a3a5716b1bb6b155b1a
|
b72c2a20cd58da648dfdc5148e938f269ff1d38c
|
/man/substanceStructures.Rd
|
ba9ba01e0e610ea36ebc9ee4716c2b7483a122e2
|
[
"MIT"
] |
permissive
|
enanomapper/renm
|
5d9e0e3798512f861b128b6749b7549b9b5cd381
|
7346ce8cbb0d84b29473a139af31416a2c28ee26
|
refs/heads/master
| 2021-01-10T12:38:11.895305
| 2020-02-12T11:14:35
| 2020-02-12T11:14:35
| 44,956,480
| 1
| 1
|
NOASSERTION
| 2020-02-12T11:18:40
| 2015-10-26T08:51:24
|
R
|
UTF-8
|
R
| false
| false
| 538
|
rd
|
substanceStructures.Rd
|
\name{substanceStructures}
\alias{substanceStructures}
\title{
Looks up the composition structures for a specific substance
}
\description{
Looks up the structures in the composition of a specific substance as identified with the given
URI.
}
\usage{
substanceStructures(uri)
}
\arguments{
\item{uri}{URI of the substance}
}
\value{
\item{data}{the structures of the substance}
}
\examples{
info <- substanceStructures(uri="http://data.enanomapper.net/substance/NWKI-71060af4-1613-35cf-95ee-2a039be0388a")
}
\author{
Egon Willighagen
}
|
3cb87a66f9f0162203912e4d9d30fd13e53293ba
|
6a48e67720f48380c37a7c8cae41a6e2bcc56636
|
/febonaci.R
|
1cf470b4df9d5abe4a6cc951df9cff2716933bd8
|
[] |
no_license
|
SHIVAMBASIA/R-Lab-Tasks
|
299778de7c90263cd8095bb8629d363ad87b7b29
|
3129da1d257b15c83b3e7634c24362b1a8cabdbf
|
refs/heads/master
| 2020-08-05T06:16:24.228635
| 2019-10-02T19:46:28
| 2019-10-02T19:46:28
| 212,426,957
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 158
|
r
|
febonaci.R
|
a=readline("enter number")
a=as.integer(a)
t1<-0
t2<-1
sum<-0
i<-1
print(t1)
print(t2)
for(i in 1:a){
sum=t1+t2
print(sum)
t1=t2
t2=sum
}
|
8d4281ca1c97ab1c34008cc1552525ae67485115
|
66ecf5ba741382f1f2ff9420e89926e4a68d49f7
|
/data-raw/DATASET.R
|
cf8eff85c9914519fa3f4b29f86ced5835bc7cfb
|
[
"MIT"
] |
permissive
|
Drake-Kufwafwa/Spam
|
ba648d983894ee0dd24d63bffa12fb05e50b2b8f
|
00795275a48d3e9ac80c670fc37271f07eeab967
|
refs/heads/master
| 2023-01-07T09:42:50.212665
| 2020-11-11T13:27:29
| 2020-11-11T13:27:29
| 311,977,323
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 209
|
r
|
DATASET.R
|
Spam = read.csv(file = "spambase.data", header=TRUE, stringsAsFactors=T)
# change label of last column for convenience
colnames(Spam)[58] = "spam"
Spam$spam <- as.factor(Spam$spam)
usethis::use_data(Spam)
|
fda7b92a2a04d41095b0242817a680aae11ec28f
|
4b871231c9007b3527198e9208243f2a78fd1cf1
|
/ThePlantGenome_Version/Phenotype_Data/Empirical_Validation_BLUP_Calculations.R
|
9718a698699db69f0eee02aba69e6f3ef9700678
|
[] |
no_license
|
mdzievit/Genomic_Prediction
|
33c9a7be55bc99b22159cb3f6d62a8b6511f9bad
|
60d3b6aa1dfec35bb4344b0ade70e12846a8f61d
|
refs/heads/master
| 2021-12-28T10:49:32.186901
| 2021-12-21T19:05:40
| 2021-12-21T19:05:40
| 141,373,493
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,775
|
r
|
Empirical_Validation_BLUP_Calculations.R
|
library(tidyverse)
library(broom)
library(lme4)
library(ggpubr)
library(patchwork)
## Load the raw phenotypic data for the empirical population
emp_raw <- read_tsv("Phenotype_Data/Empirical_Validation/Empirical_Traits_Raw.txt")
#### Correlation Plot ####
#### Function that builds each row of the raw data correlation plot seen in Sup Fig 4
cor_plot <- function(data,
cur_trait,title = FALSE,not_bottom = TRUE) {
if(title) {
x_add <- 0.125
} else {
x_add <- 0.1
}
temp <- data %>%
filter(Trait == cur_trait) %>%
pivot_wider(id_cols = c(Trait,Genotype,Pedigree),
names_from = c(Env),
values_from = Value)
my_theme <- theme(plot.margin = unit(c(0,0.35,0,0), "lines"),
plot.title = element_text(hjust = 0.5,size = 10),
panel.grid = element_blank(),
strip.text = element_text(size = 8))
plot1 <- temp %>%
ggscatter(x = "IA15", y = "IA16", add = "reg.line",size = 1) +
stat_cor(aes(label = ..r.label..),
size = 2.5,color = 'red',fontface = 'bold',
cor.coef.name = 'r',method = 'pearson') +
theme_bw() +
my_theme
plot2 <- temp %>%
ggscatter(x = "IA15", y = "IA17", add = "reg.line", size = 1) +
stat_cor(aes(label = ..r.label..),
size = 2.5,color = 'red',fontface = 'bold',
cor.coef.name = 'r',method = 'pearson') +
theme_bw() +
my_theme
plot3 <- temp %>%
ggscatter(x = "IA16", y = "IA17", add = "reg.line",size = 1) +
stat_cor(aes(label = ..r.label..),
size = 2.5,color = 'red',fontface = 'bold',
cor.coef.name = 'r',method = 'pearson') +
theme_bw() +
geom_text(x = max(temp$IA16,na.rm = TRUE) + max(temp$IA16,na.rm = TRUE) * x_add,
y = (min(temp$IA17,na.rm = TRUE) + max(temp$IA17, na.rm = TRUE))/2,
label = cur_trait,angle = 270,
size = 3.25) +
coord_cartesian(clip = "off",
xlim = c(min(temp$IA16,na.rm = TRUE),max(temp$IA16,na.rm = TRUE))) +
theme(plot.margin = unit(c(0,0.8,0,0), "lines"),
plot.title = element_text(hjust = 0.5,size = 10),
panel.grid = element_blank())
if(title) {
plot1 <- plot1 + ggtitle('IA15 vs IA16')
plot2 <- plot2 + ggtitle('IA15 vs IA17')
plot3 <- plot3 + ggtitle('IA16 vs IA17')
}
if (not_bottom) {
}
out <- plot1 + plot2 + plot3
return(out)
}
#### builds the correlation row for each trait
ph <- cor_plot(data = emp_raw,
cur_trait = 'PlantHeight')
eh <- cor_plot(data = emp_raw,
cur_trait = 'EarHeight',
not_bottom = FALSE)
mla <- cor_plot(data = emp_raw,
cur_trait = 'MiddleLeafAngle')
ula <- cor_plot(data = emp_raw,
cur_trait = 'UpperLeafAngle',
title = TRUE)
#### Using patchwork to piece these all together
cor_out <- ula / mla / ph / eh
# ggsave(filename = 'Phenotype_Data/Empirical_Validation/Supplemental_Figure_S4.png',
# plot = cor_out,
# width = 7,
# height = 8,
# dpi = 1200)
# ggsave(filename = 'Final_Figures/Supplemental_Figure_S4.pdf',
# plot = cor_out,
# width = 180,
# height = 203.2,
# units = 'mm',
# dpi = 1200)
#### Mixed model function for the BLUP analysis ####
mixed_model <- function(data, return_type,genotype_term) {
if (genotype_term == "fixed") {
mod <- lmer(Value ~ (1 | Env) + Genotype,
data = data %>%
mutate(Env = factor(Env)),
REML = TRUE)
} else if (genotype_term == "random") {
orig_mod <- lmer(Value ~ 1 + (1 | Env) + (1 | Genotype),
data = data %>%
mutate(Genotype = factor(Genotype),
Env = factor(Env)),
REML = TRUE)
mod <- coef(orig_mod)$Genotype %>%
rownames_to_column(var = "Genotype") %>%
select(Genotype,'(Intercept)') %>%
rename(BLUP = '(Intercept)')
}
if (return_type == "effects") {
return(mod)
} else if (return_type == "var") {
return(print(VarCorr(orig_mod),
comp ="Variance"))
}
}
#### Running the mixed model and BLUPs ####
#### BLUP analysis
blups <- emp_raw %>%
group_by(Trait) %>%
do((mixed_model(data = .,
return_type = "effects",
genotype_term = "random"))) %>%
ungroup()
#### Outputs the BLUPs for future use
# write_tsv("Phenotype_Data/Empirical_Validation/Empirical_Phenotypes_BLUPs.txt",
# x = blups)
#### Plotting Sup Fig S3 ####
#### Plotting options
plot_theme <- theme(aspect.ratio = .75,
axis.title = element_text(size = 5,
face = "bold"),
axis.text = element_text(size = 4,
face = "bold",
color = "black"),
plot.margin = unit(c(0,.1,0,.1),"cm"),
strip.background = element_blank(),
strip.text = element_text(size = 4,
face = "bold"),
panel.grid = element_blank())
#### Combined histogram that includes the raw data across 3 env, and then the combined BLUPs
(emp_hist <- emp_raw %>%
select(-Pedigree) %>%
bind_rows(blups %>%
mutate(Env = "Combined\nBLUPs") %>%
rename(Value = BLUP)) %>%
mutate(Trait = case_when(
Trait == 'UpperLeafAngle' ~ 'UpperLeafAngle(°)',
Trait == 'MiddleLeafAngle' ~ 'MiddleLeafAngle(°)',
Trait == 'PlantHeight' ~ 'PlantHeight(cm)',
Trait == 'EarHeight' ~ 'EarHeight(cm)'),
Trait = factor(Trait,
levels = c("UpperLeafAngle(°)","MiddleLeafAngle(°)",
"PlantHeight(cm)","EarHeight(cm)")),
Env = factor(Env,
levels = c("IA15","IA16","IA17","Combined\nBLUPs"))) %>%
ggplot(aes(Value)) +
geom_histogram() +
xlab("Phenotypic Value") +
facet_grid(Env ~ Trait,
scales = "free") +
theme_bw() +
plot_theme)
# ggsave(plot = emp_hist,
# filename = "Phenotype_Data/Empirical_Validation/Supplemental_Figure_S3.png",
# width = 3.5,
# height = 2.65,
# dpi = 1200)
# ggsave(plot = emp_hist,
# filename = "Final_Figures/Supplemental_Figure_S3.pdf",
# width = 80,
# height = 65,
# units = 'mm',
# dpi = 1200)
#### Calculate harmonic means, variance components, and heritability ####
var_comp <- emp_raw %>%
group_by(Trait) %>%
do(as_tibble(mixed_model(data = .,
return_type = "var",
genotype_term = "random"))) %>%
ungroup()
#### Calculate harmonic means and output to the screen
(harmonic_mean <- emp_raw %>%
na.omit() %>%
group_by(Trait,Genotype) %>%
summarise(Num_Env = n()) %>%
ungroup() %>%
group_by(Trait,Num_Env) %>%
summarise(n = n()) %>%
mutate(add = (1/Num_Env)*n) %>%
summarise(e = sum(n)/sum(add)))
#### Calculate heritability and output to the screen
(herit <- var_comp %>%
select(Trait,grp,vcov) %>%
spread(grp,vcov) %>%
left_join(harmonic_mean) %>%
mutate(H = (Genotype/(Genotype + (Residual/e)))))
#### Output the heritability
# write_tsv("Phenotype_Data/Empirical_Validation/Empirical_Heritabilities.txt",
# x = herit)
|
2ada67da2341b2f919a5584ad2f34ef8cb809be5
|
5309bb4d2fbf775e66d404a1a319b10c6295cd7b
|
/SCRIPTS/reader.R
|
c7cab31b52dbc4064a67e2114671658835dca100
|
[] |
no_license
|
puttak/power-to-decarbonize
|
ea13bd68dc663399242f9fb2f6484724b5f3cde7
|
63999c6ea224cebd9ff6444d4a6df094962b27e9
|
refs/heads/master
| 2021-09-16T07:15:30.960669
| 2018-03-14T04:02:22
| 2018-03-14T04:02:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,022
|
r
|
reader.R
|
## Prelude
# Constants
bp_link = "https://www.bp.com/content/dam/bp/en/corporate/excel/energy-economics/statistical-review-2017/bp-statistical-review-of-world-energy-2017-underpinning-data.xlsx"
# Packages Used
lapply(c("magrittr", "rvest", "dplyr", "tidyr", "readr"), require, character.only = TRUE)
# Custom-defined Functions
`+` = function(x, y) {
if (is.character(x) || is.character(y)) {
return(paste(x , y, sep = ""))
} else {
.Primitive("+")(x, y)
}
}
put.into = function(df, lst) {
df %>% list() %>% append(x = lst)
}
# Set directory paths
script.path = normalizePath(sys.frame(1)$ofile)
script.dir = dirname(script.path)
data.dir = dirname(script.dir) + "/DATA/"
worldbank.dir = data.dir + "/worldbank/"
## WorldBank
# Read & clean WorldBank national data series
population <-
read.csv(worldbank.dir + "population.csv", stringsAsFactors = F) %>%
select(Country = Country.Name, Year = Time, Population = Value) %>%
mutate(`Population (Millions)` = Population / 1e6,
Country = recode(Country,
"Korea, Rep." = "South Korea",
"Iran, Islamic Rep." = "Iran",
"Venezuela, RB" = "Venezuela",
"Egypt, Arab Rep." = "Egypt",
"Hong Kong SAR, China" = "China Hong Kong SAR",
"Trinidad and Tobago" = "Trinidad & Tobago",
"Slovak Republic" = "Slovakia")) %>%
select(Country, Year, Population, `Population (Millions)`)
gdp <-
read.csv(worldbank.dir + "gdp.csv", stringsAsFactors = F) %>%
select(Country = Country.Name, Year = Time, GDP = Value) %>%
mutate(`GDP (Millions)` = GDP / 1e6,
Country = recode(Country,
"Korea, Rep." = "South Korea",
"Iran, Islamic Rep." = "Iran",
"Venezuela, RB" = "Venezuela",
"Egypt, Arab Rep." = "Egypt",
"Hong Kong SAR, China" = "China Hong Kong SAR",
"Trinidad and Tobago" = "Trinidad & Tobago",
"Slovak Republic" = "Slovakia")) %>%
select(Country, Year, GDP, `GDP (Millions)`)
gdp_per_capita <-
read.csv(worldbank.dir + "gdpcapita.csv", stringsAsFactors = F) %>%
select(Country = Country.Name, Year = Time, `GDP per Capita` = Value) %>%
mutate(Country = recode(Country,
"Korea, Rep." = "South Korea",
"Iran, Islamic Rep." = "Iran",
"Venezuela, RB" = "Venezuela",
"Egypt, Arab Rep." = "Egypt",
"Hong Kong SAR, China" = "China Hong Kong SAR",
"Trinidad and Tobago" = "Trinidad & Tobago",
"Slovak Republic" = "Slovakia")) %>%
select(Country, Year, `GDP per Capita`)
# Read and clean WorldBank global data series
global_population <-
read.csv(worldbank.dir + "global-population.csv", stringsAsFactors = F) %>%
select(Country = Country.Name, Date = Time, Population = Value) %>%
mutate(Year = Date, `Population (Billions)` = Population / 1e9) %>%
select(Year, Population, `Population (Billions)`)
global_gdp <-
read.csv(worldbank.dir + "global-gdp.csv", stringsAsFactors = F) %>%
select(Country = Country.Name, Date = Time, GDP = Value) %>%
mutate(Year = Date, `GDP (Trillions)`= GDP / 1e12) %>%
select(Year, GDP, `GDP (Trillions)`)
# Read and clean USSR population data series separately (as USSR is not found in WorldBank data)
ussr_population <- read_html("https://commons.wikimedia.org/wiki/File:Population_of_former_USSR.PNG") %>%
html_nodes(xpath = '//*[@id="mw-imagepage-content"]/div/table[1]') %>% html_table() %>% `[[`(1) %>%
rename(Year = X1, `Population (Millions)` = X2) %>%
transmute(Country = "USSR",
Year,
`Population (Millions)` = `Population (Millions)` %>% gsub(pattern = ",", replacement = "") %>% as.numeric,
Population = `Population (Millions)` * 1e6) %>%
select(Country, Year, Population, `Population (Millions)`)
# Read and clean Taiwan population data series separately (as Taiwan is not found in WorldBank data)
taiwan_population <- read_csv(data.dir + "taiwan-population.csv") %>% transmute(Country = "Taiwan", Year, Population = `Population (Millions)` * 1e6, `Population (Millions)`)
# Compile data into national and global datasets
worldbank <- population %>% left_join(gdp) %>% left_join(gdp_per_capita) %>% bind_rows(ussr_population, taiwan_population)
global_worldbank <- global_population %>% left_join(global_gdp) %>%
mutate(`GDP per Capita` = GDP / Population)
## BP
# Read nuclear electricity generation data for Ukraine, Russia, & Lithuinia (as they are not included in BP's dataset prior to 1985)
# Source: Power Reactor Information System (PRIS) from IAEA
lru_nuclear <- read_csv(data.dir + "lru-nuclear.csv")
# Read and clean BP Statistical Review of World Energy 2017 underpinning data
download.file(bp_link, "temp.xlsx")
readBPSheet <- function(file = "temp.xlsx", sheet, label, range = "A3:BA87", years = 1965:2016) {
bp_sheet <- read_excel(path = file, sheet = sheet, range = range, na = "n/a")
bp_sheet %>% .[rowSums(is.na(.)) != ncol(.),] %>%
select(Country = 1, everything()) %>%
filter(! grepl(Country, pattern = "Total"), ! grepl(Country, pattern = "Other")) %>%
gather(key = "Year", value = !!label, as.character(years)) %>%
mutate(Year = as.numeric(Year),
Country = recode(Country,
"US" = "United States")) %>%
return()
}
# Read each sheet into a list of dataframes, then join them. Change units if necessary.
bp_data_list = list()
readBPSheet(sheet = "Primary Energy Consumption", label = "Energy Consumption (Mtoe)") %>%
mutate(`Energy Consumption (TWh)` = 11.63 * `Energy Consumption (Mtoe)`) %>%
put.into(bp_data_list) -> bp_data_list
readBPSheet(sheet = "Electricity Generation ", label = "Electricity Generation (TWh)",
range = "A3:AG86", years = 1985:2016) %>% # Note: BP only records total electricity generation after 1984
put.into(bp_data_list) -> bp_data_list
readBPSheet(sheet = "Carbon Dioxide Emissions", label = "CO2 Emissions (Mt)") %>%
mutate(`CO2 Emissions (kg)` = 1e9*`CO2 Emissions (Mt)`) %>%
put.into(bp_data_list) -> bp_data_list
readBPSheet(sheet = "Oil Consumption - Tonnes", label = "Oil Energy Consumption (Mt)") %>%
mutate(`Oil Energy Consumption (TWh)` = 11.63 * `Oil Energy Consumption (Mt)`) %>%
put.into(bp_data_list) -> bp_data_list
readBPSheet(sheet = "Gas Consumption - Mtoe", label = "Gas Energy Consumption (Mtoe)") %>%
mutate(`Gas Energy Consumption (TWh)` = 11.63 * `Gas Energy Consumption (Mtoe)`) %>%
put.into(bp_data_list) -> bp_data_list
readBPSheet(sheet = "Coal Consumption - Mtoe", label = "Coal Energy Consumption (Mtoe)") %>%
mutate(`Coal Energy Consumption (TWh)` = 11.63 * `Coal Energy Consumption (Mtoe)`) %>%
put.into(bp_data_list) -> bp_data_list
readBPSheet(sheet = "Nuclear Consumption - Mtoe", label = "Nuclear Energy Consumption (Mtoe)") %>%
mutate(`Nuclear Energy Consumption (TWh)` = 11.63 * `Nuclear Energy Consumption (Mtoe)`) %>%
put.into(bp_data_list) -> bp_data_list
readBPSheet(sheet = "Nuclear Consumption - TWh", label = "Nuclear Electricity Generation (TWh)") %>%
bind_rows(lru_nuclear) %>% # Note: BP only records nuclear electricity generation for Lithuania, Russia, and Ukraine after 1984
put.into(bp_data_list) -> bp_data_list
readBPSheet(sheet = "Hydro Consumption - TWh", label = "Hydro Electricity Generation (TWh)") %>%
put.into(bp_data_list) -> bp_data_list
readBPSheet(sheet = "Wind Consumption - TWh ", label = "Wind Electricity Generation (TWh)") %>%
put.into(bp_data_list) -> bp_data_list
readBPSheet(sheet = "Solar Consumption - TWh", label = "Solar Electricity Generation (TWh)") %>%
put.into(bp_data_list) -> bp_data_list
unlink("temp.xlsx")
# Add WorldBank with Taiwan & USSR population data to list
worldbank %>% put.into(bp_data_list) -> bp_data_list
bp <- bp_data_list %>% reduce(left_join) # joins by Country, Year
bp %<>%
mutate(`CO2 Emissions per Capita (kg)` = `CO2 Emissions (kg)` / Population) %>%
mutate_at(vars(ends_with("(TWh)")), funs(pcmwh = . * 1e6 / Population)) %>%
rename_at(vars(ends_with("_pcmwh")), funs(sub(pattern = "_pcmwh", replacement = "", x = .) %>%
sub(pattern = "\\(TWh\\)", replacement = "per Capita \\(MWh\\)", x = .))) %>%
mutate(Electrification = `Electricity Generation (TWh)` / `Energy Consumption (Mtoe)`,
`Carbon Intensity of Economy (g/$)` = `CO2 Emissions (kg)` * 1000 / GDP,
`Energy Intensity of Economy (kWh/$)` = `Energy Consumption (TWh)` * 1e9 / GDP,
`Carbon Intensity of Energy (g/kWh)` = (`CO2 Emissions (kg)` * 1000) / (`Energy Consumption (TWh)` * 1e9),
`Carbon Intensity of Electricity (g/kWh)` = (`CO2 Emissions (kg)` * 1000) / (`Electricity Generation (TWh)` * 1e9)) %>%
select(sort(names(.))) %>% select(Country, Year, everything()) %>% arrange(Country, Year)
# Save BP national data as both CSV and RDS
bp %>% saveRDS(data.dir + "BP")
bp %>% write_csv(data.dir + "bp.csv")
## Global BP dataset made by summing national BP data
bp_global <- bp %>% select(Year, `CO2 Emissions (Mt)`, `Energy Consumption (TWh)`, `Electricity Generation (TWh)`) %>%
group_by(Year) %>% summarise(`CO2 Emissions (Gt)` = sum(`CO2 Emissions (Mt)`, na.rm = T) / 1e3,
`Energy Consumption (PWh)` = sum(`Energy Consumption (TWh)`, na.rm = T) / 1e3,
`Electricity Generation (PWh)` = sum(`Electricity Generation (TWh)`, na.rm = T) / 1e3) %>%
left_join(global_worldbank) %>% select(sort(names(.))) %>% select(Year, everything()) %>% arrange(Year)
## Save BP global data as both CSV and RDS
bp_global %>% saveRDS(data.dir + "BP_GLOBAL")
bp_global %>% write_csv(data.dir + "bp_global.csv")
|
b6549a33569d42bc86f949e9af1cf6e040ebeebe
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/meteor/inst/testfiles/ET0_ThornthwaiteWilmott/libFuzzer_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1612736207-test.R
|
a3817d89db82f560d58b179d0a0a5b29b5663d40
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 846
|
r
|
1612736207-test.R
|
testlist <- list(doy = numeric(0), latitude = c(5.38995219951238e-312, 9.33119502699939e-300, 2.77478395271014e+180, 2.77448001456307e+180, -9.47149324620775e-150, 2.77448001607381e+180, 6.12454249645427e-302, -5.70159253396729e+303, 7.39655581425648e-304, -5.04615759304534e+304, 2.77448001764258e+180, NaN, 5.77591959922951e-275, -9.47149713990033e-150, 3.41367790154461e-312, NaN, NaN, NaN, 6.83542689333415e-304, NaN, 2.77448009512628e+180, NaN, 2.52467545024877e-321, NaN, NaN, NaN, NaN, 2.77478394498107e+180, -5.48608744496425e+303, 6.83631692287773e-304, 5.77591857965479e-275, -5.8293167202491e+303, NaN, NaN, NaN, NaN, -2.88021511976689e+304, 6.38062260106304e-304, 1.56617454489237e-305, 3.25938470805276e-310, 0, 0, 0, 0, 0, 0, 0, 0), temp = NA_real_)
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result)
|
8fd85f969a01061f4de3f992e7de471f2d48d01d
|
2b659440cb2f2082a85a51bcc1dbecc014a262d4
|
/man/paramDistBoxplot.Rd
|
6f4cabb5d3e479377d1909e04f4c30c68ce7b5b5
|
[] |
no_license
|
welch16/ChIPexoQual
|
f189dccfb013d8a5d772cf4fcccbdcaccf17a576
|
8ded1e9df7822ab4c8f6711cbf1f58ae03a08a46
|
refs/heads/master
| 2021-04-18T23:45:44.977354
| 2018-03-01T19:15:52
| 2018-03-01T19:15:52
| 27,845,697
| 1
| 3
| null | 2019-11-07T15:23:38
| 2014-12-10T23:58:14
|
R
|
UTF-8
|
R
| false
| true
| 1,292
|
rd
|
paramDistBoxplot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/base_summaryPlots.R
\name{paramDistBoxplot}
\alias{paramDistBoxplot}
\title{paramDistBoxplot}
\usage{
paramDistBoxplot(..., names.input = NULL, which.param = "beta1",
sort.as.numeric = FALSE)
}
\arguments{
\item{...}{a \code{list} of \code{ExoData} objects, or several
\code{ExoData} objects by themselves.}
\item{names.input}{a character vector with the names to use in the
plot. If it is empty \code{paramDistBoxplot} is going to create the
names as the names of the list when they are available or is going to
name them as Sample: 1 ,... , Sample: k.}
\item{which.param}{a character value with either \code{"beta1"} or
\code{"beta2"} that determines which paramters in the model
depth_i ~ uniquePos_i + width_i to plot. The default value is
\code{"beta1"}.}
\item{sort.as.numeric}{a logical value indicating if the values of
\code{names.input} are meant to be interpreted as numeric and sorted
accordingly.}
}
\value{
A \code{ggplot2} object with the boxplot of the chosen
parameter
}
\description{
\code{paramDistBoxplot} returns a \code{ggplot} object with a
boxplot comparing the \code{ntimes} estimations of the chosen
parameter.
}
\examples{
data(exoExample)
paramDistBoxplot(exoExample)
}
|
f219667935e10a0565334a662ae09593087eb4ad
|
ac7cadda91891f78d0897256a6ff43e02e66e955
|
/vignettes/plot_evoked_multilevel_model.R
|
a78ac4714e8104c3403f35eb5aabb12300e95954
|
[] |
no_license
|
mne-tools/mne-r
|
23a30ce76ad4e41b1f5fb85737fb9e683e6e9337
|
930a5a9a035234d2e64e436a5ed3f210bf5f97d2
|
refs/heads/master
| 2020-05-17T00:56:30.612648
| 2020-02-28T20:09:08
| 2020-02-28T20:09:08
| 183,412,516
| 31
| 8
| null | 2020-02-28T20:09:09
| 2019-04-25T10:31:57
|
R
|
UTF-8
|
R
| false
| false
| 4,415
|
r
|
plot_evoked_multilevel_model.R
|
## ----setup, include = T, echo = F----------------------------------------
library(tidyverse)
library(mne)
library(lme4)
library(merTools)
## ------------------------------------------------------------------------
data_path <- mne$datasets$sample$data_path()
subject <- "sample"
raw_fname <- paste(data_path,
'MEG',
subject,
'sample_audvis_filt-0-40_raw.fif',
sep = '/')
raw <- mne$io$read_raw_fif(raw_fname, preload = T)
## ------------------------------------------------------------------------
events <- mne$find_events(raw)
storage.mode(events) <- "integer" # R gets the events as floats.
tmin <- -0.2
tmax <- 0.5
baseline <- reticulate::tuple(NULL, 0)
event_id <- list("aud/l" = 1L, "vis/l" = 3L)
picks <- mne$pick_channels(raw$ch_names, list('MEG 1332'))
epochs <- mne$Epochs(raw = raw, events = events, event_id = event_id,
tmin = tmin, tmax = tmax,
picks = picks %>% as.integer(),
baseline = baseline, reject = NULL, preload = T)
## ------------------------------------------------------------------------
# use MNE-R function.
epochs_df <- mne::get_data_frame(epochs)
## ------------------------------------------------------------------------
mod1 <- lmer(observation ~ 1 + condition + (1 + condition | time),
data = epochs_df)
mod1 %>% summary() %>% print()
## ---- fig.width=8, fig.heigh=6-------------------------------------------
probe <- expand.grid(
condition = c("aud/l", "vis/l") %>% as.factor(),
time = epochs_df$time %>% unique()
)
pred_mod1 <- predict(mod1, probe)
probe$pred <- pred_mod1
ggplot(data = epochs_df,
mapping = aes(x = time, y = observation,
group = interaction(condition, epoch),
color = condition)) +
geom_line(size = 0.3, alpha = 0.4) +
geom_line(
size = 1.5, data = probe,
mapping = aes(x = time, y = pred, group = condition,
color = condition)) +
theme_minimal() +
theme(text = element_text(size = 24, family = "Helvetica")) +
labs(x = "times [ms]",
y = "predicted GRAD signal [fT/cm]") +
ylim(-300, 300)
## ------------------------------------------------------------------------
pred_interval_mod1 <- predictInterval(
merMod = mod1, newdata = probe, which = "full", level = 0.95,
n.sims = 1000, stat = "mean", type = "linear.prediction",
returnSims = T, seed = 42
)
probe_int <- bind_cols(
probe, pred_interval_mod1)
## ---- fig.width=8, fig.height=4------------------------------------------
ggplot(
data = probe_int,
mapping = aes(x = time, y = pred, group = condition,
color = condition, ymin = lwr, ymax = upr)) +
geom_ribbon(mapping = aes(fill = condition), alpha = 0.1) +
geom_line(size = 1) +
theme_minimal() +
theme(text = element_text(size = 20, family = "Helvetica")) +
labs(x = "times [ms]",
y = "predicted GRAD signal [fT/cm]") +
facet_wrap(~condition) +
guides(color = F, fill = F)
## ------------------------------------------------------------------------
# let's subsample a few bootstrap simulations.
idx <- sample(1:1000, size = 100)
pred_sims <- attr(pred_interval_mod1, "sim.results")[,idx] %>%
as.data.frame() %>%
gather(key = "sim", value = "pred_hat")
pred_sims$sim <- pred_sims$sim %>% as.factor()
pred_sims$time <- probe_int$time
pred_sims$condition <- probe_int$condition
pred_sims$pred <- probe_int$pred
## ---- fig.width=8, fig.height=4------------------------------------------
ggplot(
data = pred_sims,
mapping = aes(x = time, y = pred,
group = condition,
color = condition)) +
geom_line(
alpha = 0.05, mapping = aes(
y = pred_hat,
group = interaction(sim, condition))) +
stat_summary(
fun.ymin = function(x){quantile(x, 0.025)},
fun.ymax = function(x){quantile(x, 0.975)},
mapping = aes(x = time,
y = pred_hat,
fill = condition,
group = condition,
color = condition),
geom = "ribbon", alpha = 0.1) +
geom_line(size = 1.5) +
theme_minimal() +
theme(text = element_text(size = 20, family = "Helvetica")) +
labs(x = "times [ms]",
y = "predicted GRAD signal [fT/cm]") +
facet_wrap(~condition) +
guides(color = F, fill = F)
|
d4a1af4ff66e135e24520a0bbb94c21746d9efe7
|
33ccc0ffddad6e0b8c4562a8139a435331a80751
|
/03_data_managing/R/02_simulation_complete_project/graficacion.R
|
e08f5bfd3b9f4c53b390b8461e5b53626d4f938d
|
[] |
no_license
|
AntonioPL/masterDataScience
|
0a9930b890f75ca79420470c99827412ab92f082
|
d70d79557e2447f0edee46b38ec16daaebe969f0
|
refs/heads/master
| 2020-03-23T16:57:02.517114
| 2018-06-02T10:18:49
| 2018-06-02T10:18:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 550
|
r
|
graficacion.R
|
# Función para graficar (visualizar un tablero)
pintarTablero <- function(tablero, descontento) {
# Quito margen en blanco
par(mar = rep(0,4))
# Pinto la matriz de colores
image(tablero, col = c("white","#2a8fa3", "#ff8d00"),
axes = FALSE, xlab=NA, xaxs="i", yaxs="i")
posicionesDescontento <- which(descontento, arr.ind = T)
# Pintamos las X sobre las casillas descontentas
points((posicionesDescontento[,"row"]-1)/(nrow(descontento)-1),
(posicionesDescontento[,"col"]-1)/(ncol(descontento)-1), pch=4)
}
|
71987640e85eccc81520a947b028945eaf766809
|
470c020009250d2ad9f21dbbe14921d52bb92fd1
|
/run_analysis.R
|
d9af57f1fa3f19f56916544743abfccae7e3987e
|
[] |
no_license
|
Jinishc/run_analysis
|
2735b833ee480af93bb425c0ccf14f3e7f6fc00b
|
51f6cae15563eeacefdd8690350cbec6b60eb28a
|
refs/heads/master
| 2021-01-22T02:58:33.497759
| 2014-09-21T19:32:33
| 2014-09-21T19:32:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,726
|
r
|
run_analysis.R
|
##Reading training set
train_x <- read.table("cloud_data/UCI HAR Dataset/train/X_train.txt")
##Reading test set
test_x <- read.table("cloud_data/UCI HAR Dataset/test/X_test.txt")
##reading training labels
train_y <- read.table("cloud_data/UCI HAR Dataset/train/y_train.txt")
##reading test label
test_y <- read.table("cloud_data/UCI HAR Dataset/test/y_test.txt")
##subjects who took part in the training set
subject_train <- read.table("data_cloud/UCI HAR Dataset/train/subject_train.txt")
##subjects who took part in the test set
subject_test <- read.table("data_cloud/UCI HAR Dataset/test/subject_test.txt")
##activities
activity_lables <- read.table("data_cloud/UCI HAR Dataset/activity_labels.txt")
features<-read.table("data_cloud/UCI HAR Dataset/features.txt")
##============================================================================================##
##Merging the training and the test sets.
complete_x <-rbind(train_x,test_x)
##================================================================================================##
##Extracting only the measurements on the mean and standard deviation for each measurement.
colnames(complete_x) <- c(as.character(features[,2]))
mean<-grep("mean()",colnames(complete_x),fixed=TRUE)
sd<-grep("std()",colnames(complete_x),fixed=TRUE)
meansd<-complete_x[,c(mean,sd)]
#=====================================================================================================##
#Using descriptive activity names to name the activities in the data set.
complete_y<-rbind(train_y,test_y)
activity<-cbind(complete_y,meansd)
colnames(activity)[1] <- "Activity"
##========================================================================================================##
##labeling the data set with descriptive activity names.
activity_lables[,2]<-as.character(activity_lables[,2])
for(i in 1:length(activity[,1])){
activity[i,1]<-activity_lables[activity[i,1],2]
}
##==========================================================================================================##
##Making a tidy data set with the average of each variable for each activity and each subject.
subject_all<-rbind(subject_train,subject_test)
all<-cbind(subject_all,activity)
colnames(all)[1] <- "Subject"
Tidy_data <- aggregate( all[,3] ~ Subject+Activity, data = all, FUN= "mean" )
for(i in 4:ncol(all)){
Tidy_data[,i] <- aggregate( all[,i] ~ Subject+Activity, data = all, FUN= "mean" )[,3]
}
colnames(Tidy_data)[3:ncol(Tidy_data)] <- colnames(meansd)
##=============================================================================================================##
##Writing the data into a file.
write.table(Tidy_data, file = "tidy_data.txt", row.names=FALSE)
|
b189e437c7aa2bdd2e98d0ec9b4bc141207e0ce9
|
776f8f9d8b06aa953ddce8a6c4d49bc4b6a3b940
|
/plot1.R
|
d6d0ec3768e946248da9828981419783ce5dbc15
|
[] |
no_license
|
FatimaAlshaikh59/ExData_Plotting1
|
bae757fb4706b947de451c25c6e280a069669a60
|
aebbf7fb9291f6177d88db336b15968f3590b89c
|
refs/heads/master
| 2021-05-12T06:55:41.843900
| 2018-01-12T18:52:55
| 2018-01-12T18:52:55
| 117,229,738
| 0
| 0
| null | 2018-01-12T10:48:07
| 2018-01-12T10:48:06
| null |
UTF-8
|
R
| false
| false
| 1,295
|
r
|
plot1.R
|
#create a directory if not found, in which the downloded data will be stored
if(!file.exists("./Project")){dir.create("./Project")}
#data set zip Url, from where the data will be downloded
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
#download the zip file
download.file(fileUrl,destfile="./Project/Dataset.zip")
# unzip the zip file to Project directory
unzip(zipfile="./Project/Dataset.zip",exdir="./Project")
#reading test tables
mydata<- read.table("./Project/household_power_consumption.txt", sep = ";", header= TRUE, na.strings = "?" )
#subseting the data
subset <- subset(mydata,mydata$Date=="1/2/2007" | mydata$Date =="2/2/2007")
library(dplyr)
library (lubridate)
#convert the date and time
subset$Date<- as.Date(subset$Date, format= "%d/%m/%Y")
subset$Time<- strptime(subset$Time, format="%H:%M:%S")
subset[1:1440,"Time"] <- format(subset[1:1440,"Time"],"2007-02-01 %H:%M:%S")
subset[1441:2880,"Time"] <- format(subset[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
#seting device file
png(file= "plot1.png")
#making histogram
hist(subset$Global_active_power, col= "red", main= "Global Active Power", xlab= "Global Active Power (killowatts)", ylab= "Frequancy")
#closing the device
dev.off()
|
e06241cf0bab134d9b79934833b7a12b94d78c77
|
c71f1b20e935a67765f79197c797642ac54eada9
|
/tests/runit.c2fTest.R
|
d404ef65a783b4ecefed6a6d6e1852713072b675
|
[] |
no_license
|
matthewstifler/IntroToRPresentation
|
1278f7ac425e13cbedc8d294ed60926c3d4c6a20
|
b7f58ff8251236b34b1d7e21afa7988645a9214a
|
refs/heads/master
| 2020-02-26T17:35:55.526262
| 2015-08-07T00:55:32
| 2015-08-07T00:55:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 724
|
r
|
runit.c2fTest.R
|
# default naming convention for test functions is test.NameOfFunctionToTest
test.c2f_ValueZero <- function() {
#arrange
#act
result<-c2f(0)
#assert
checkEquals(result, 32)
}
test.c2f_Value50 <- function() {
#arrange
#act
result<-c2f(10)
#assert
checkEqualsNumeric(result, 50)
}
test.c2f_ValueStringParam <- function() {
#arrange
#act
checkException(c2f("xx"))
#assert
}
test.c2f_ValueStringEmpty <- function() {
#arrange
#act
checkException(c2f(""))
#assert
}
test.c2f_ValueZeroToShowFailure <- function() {
#arrange
#act
result<-c2f(0)
#assert
checkEqualsNumeric(result, 75)
}
ThisIsNotATest<-function(){
checkException(c2f("xx"))
}
|
b8f75dcdf71ad136c66b8becc83c9bc52c958f68
|
1bc01adb5c92c98fbf3ac76623e93d951db59808
|
/work/u2/FluTrain.R
|
36c74bdc09ec69b7592d7a6509252082655d8ede
|
[] |
no_license
|
sergii-bond/mit15.071x
|
8d86c9c033d56f7cdef612ce5095a0fdcad0cb67
|
5309a71540402102fbd1de2fa2c9eaf776dbb043
|
refs/heads/master
| 2021-05-29T06:41:54.707270
| 2015-05-25T06:53:46
| 2015-05-25T06:53:46
| 31,793,701
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,311
|
r
|
FluTrain.R
|
FluTrain = read.csv("FluTrain.csv")
str(FluTrain)
#DateConvert = as.Date(strptime(FluTrain$Week, "%y-%m-%d"))
FluTrend1 = lm(log(ILI) ~ Queries, data = FluTrain)
summary(FluTrend1)
FluTest = read.csv("FluTest.csv")
PredTest1 = exp(predict(FluTrend1, newdata=FluTest))
i = which(FluTest$Week == '2012-03-11 - 2012-03-17')
x = PredTest1[i]
1 - x/FluTest$ILI[i]
SSE = sum((PredTest1 - FluTest$ILI)^2)
sqrt(SSE/nrow(FluTest))
install.packages("zoo")
library(zoo)
ILILag2 = lag(zoo(FluTrain$ILI), -2, na.pad=TRUE)
FluTrain$ILILag2 = coredata(ILILag2)
FluTrend2 = lm(log(ILI) ~ Queries + log(ILILag2), data = FluTrain)
summary(FluTrend2)
ILILag2_test = lag(zoo(FluTest$ILI), -2, na.pad=TRUE)
FluTest$ILILag2 = coredata(ILILag2_test)
FluTest$ILILag2[1] = FluTrain$ILI[nrow(FluTrain)-1]
FluTest$ILILag2[2] = FluTrain$ILI[nrow(FluTrain)]
PredTest2 = exp(predict(FluTrend2, FluTest))
sqrt(mean((PredTest2 - FluTest$ILI)^2))
#In this problem, we used a simple time series model with a single lag term.
#ARIMA models are a more general form of the model we built, which can include
#multiple lag terms as well as more complicated combinations of previous values
#of the dependent variable. If you're interested in learning more, check out ?arima
#or the available online tutorials for these sorts of models.
|
8720fb32f691ba9ae512cff3e1ff396d43d8cf45
|
d481473c7bf59ef07fb2f0f7f6353e0beff5fa48
|
/man/convo_add.Rd
|
e927b9785324a67340caa6b1b39779ccf83bdbc7
|
[] |
no_license
|
crumplecup/muddier
|
92e1d4845db3d13e1297060d50d0244d5b00064f
|
c4d67a17377e45a35426cbb11ace342afaed6806
|
refs/heads/master
| 2021-11-28T03:41:33.262356
| 2021-08-13T03:11:39
| 2021-08-13T03:11:39
| 175,301,894
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 569
|
rd
|
convo_add.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{convo_add}
\alias{convo_add}
\title{derived pmf by addition}
\usage{
convo_add(x, y, index)
}
\arguments{
\item{x}{is a numeric vector (younger pmf)}
\item{y}{is a numeric vector (older pmf)}
\item{index}{is a numeric vector (years)}
}
\value{
a length(index) numeric vector of the convolved distribution of y+x
}
\description{
Given two pmfs and an age vector, returns the derived pmf of the convolution y+x.
Lengths of x,y and index must be equal.
}
\seealso{
convo_plus
}
|
0e954150ab81da4bf049b0839fca629f7cb1b1c0
|
23830ac5e6baecea22e9f7c2ccd34f7c2a8ac240
|
/man/graticule.Rd
|
5688556539dbbafca7f6043606984ceb040afbe8
|
[] |
no_license
|
msandifo/slab
|
4c1329b25d0af3f4b68ee31194891831ca945502
|
3d68597e59886033552bcad28856df91da374fac
|
refs/heads/master
| 2021-06-28T08:50:36.213397
| 2020-10-12T21:07:30
| 2020-10-12T21:07:30
| 170,061,444
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 230
|
rd
|
graticule.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/graticules.R
\name{graticule}
\alias{graticule}
\title{Title}
\usage{
graticule(slab, inc = c(5, 5))
}
\arguments{
\item{inc}{}
}
\description{
Title
}
|
a1fb138434baf6c11eae992982ecb1a599bed933
|
17f1b5b761a43ec178602a43f24ac72c2d5d01a9
|
/hmlasso/inst/testfiles/softThresholdC/libFuzzer_softThresholdC/softThresholdC_valgrind_files/1609897137-test.R
|
9c6a90f4cdb83a16def8967ee376d2ce5492521d
|
[] |
no_license
|
akhikolla/newtestfiles-2
|
3e1882e7eea3091f45003c3abb3e55bc9c2f8f56
|
e539420696b7fdc05ce9bad66b5c7564c5b4dab2
|
refs/heads/master
| 2023-03-30T14:44:30.614977
| 2021-04-11T23:21:23
| 2021-04-11T23:21:23
| 356,957,097
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
1609897137-test.R
|
testlist <- list(g = 1.39069246441664e-308, z = 3.01741019876802e-257)
result <- do.call(hmlasso:::softThresholdC,testlist)
str(result)
|
625d97dfc6fa378d21f68ae7297fbd6fead8b5fd
|
3d7b5cce9500531959e520e06fe2eaee0bfe5c2c
|
/R/bigramfrequency.R
|
44263332b29c3997c0ac5f89f171b7c2d84e4aaa
|
[] |
no_license
|
edouardschuppert/edouaRd
|
50f4dc0123e4408fc71b8e1346f4d002940fc627
|
48c7b216057dfa904aec8c764807d689d48c3dd5
|
refs/heads/master
| 2021-06-12T07:41:33.528929
| 2021-06-07T07:51:37
| 2021-06-07T07:51:37
| 134,296,678
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,340
|
r
|
bigramfrequency.R
|
#' bigramfrequency
#'
#' @param df Your data frame
#' @param colonne Your column
#' @param slice Allows you to keep only a small number of observations, starting from the first. Default set to NA
#' @importFrom rlang enquo
#' @importFrom dplyr select
#' @importFrom dplyr mutate
#' @importFrom dplyr anti_join
#' @importFrom dplyr count
#' @importFrom dplyr arrange
#' @importFrom tidyr separate
#' @importFrom tidyr unite
#' @importFrom tidytext unnest_tokens
#' @return Return the bigram frequency of the column you chose
#' @export
bigramfrequency <- function(df, colonne, slice = NA) {
colonne <- enquo(colonne)
# Loading wordfrequency dictionary
wf_dictionary <- edouaRd::wf_dictionary
# Processing
df <- df %>%
select(!!colonne) %>%
mutate(colonne = povertext(!!colonne)) %>%
unnest_tokens(.data$bigram, colonne, token = "ngrams", n = 2, to_lower = TRUE) %>%
select(- !!colonne) %>%
separate(.data$bigram, c("bigram1", "bigram2"), sep = " ") %>%
anti_join(wf_dictionary, by = c("bigram1" = "words")) %>%
anti_join(wf_dictionary, by = c("bigram2" = "words")) %>%
unite(bigram, .data$bigram1, .data$bigram2, sep = " ") %>%
count(.data$bigram) %>%
arrange(desc(n))
# Keep only the desired length
if (is.na(slice) == FALSE) {
df <- df %>%
slice(1:slice)
}
df
}
|
3c86428ccbb759311ec83d5b4e1f76e81f92227a
|
43123af59b384a0adb1d2f1440e1c33dce4d5449
|
/examples/jobs/transpose_df.R
|
33035cb9f3132df1c7c9291d3e997105fc1caba7
|
[] |
no_license
|
timemod/regts
|
8723e6b2f2e4f088564c648fbdecf1a74ade7203
|
754490eb024354d37774e929bf640f1af58ccb5a
|
refs/heads/master
| 2023-03-06T16:45:31.978567
| 2023-03-03T13:36:53
| 2023-03-03T13:36:53
| 107,998,574
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 387
|
r
|
transpose_df.R
|
library(Hmisc)
library(regts)
df <- data.frame(namen = c("a", "b", "c"), labels = c("Variabele a",
"Variabele b",
"Variabele c"),
x = 1:3, y = 10:12)
df2 <- transpose_df(df, colname_column = 1, label_column = 2)
print(df2)
df3 <- transpose_df(df2)
print(df3)
|
5a0f220c24e1c100ddac006cbe6d577f30f55e70
|
134fa4a7168493e30a25e14459fbc5ee39434888
|
/pds_analysis.R
|
1606b0e29742fbeb9087587aad9f263e2e16fc37
|
[] |
no_license
|
Dr-Nathan-Fox/pdf_analysis
|
d9ac4a31dfd49f414b025bf0dded7942af40e4d2
|
1c692c8c99b4415dcd4f747ef1cd72cdbb55a8ff
|
refs/heads/master
| 2023-05-23T03:53:57.984566
| 2020-07-07T10:22:18
| 2020-07-07T10:22:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,559
|
r
|
pds_analysis.R
|
#### library needed packages ####
# install.packages("pdftools")
library(pdftools)
# install.packages("tm")
library(tm)
# install.packages("ggplot2")
library(ggplot2)
# install.packages("dplyr")
library(dplyr)
#### read in pdf files and store as term document matrix ####
#create object with pdf names
files <- list.files(path = ".\\papers", pattern = "pdf$")
#create corpus (may take a while with large number of papers)
corp <- Corpus(URISource(paste(".\\papers\\", files, sep = "")),
readerControl = list(reader = readPDF))
#issues with removing punctualtion from pdf encoded letters
corp <- tm_map(corp, removePunctuation, ucp = TRUE)
#create term document matrix
pdf_tdm <- TermDocumentMatrix(corp,
control =
list(stopwords = TRUE, #remove common words
tolower = TRUE, #make all words lower case
stemming = TRUE, #stem words to their root i.e. hiking and hiker both become hike
removeNumbers = TRUE, #remove numers
bounds = list(global = c(3, Inf)))) #keep words that appear more than 3 times
#### create dataframe with frequence of key words per paper ####
#list of keywords to find (this search will be determined at a later date)
keywords <- c("ethic", "social")
#find the number of times the keyword appearences in each pdf
keyword_per_paper <- as.matrix(pdf_tdm[keywords,])
#### summarise which papers contain a given keyword ####
#find which papers contain a chosen keyword (e.g. here I choose ethic)
papers_containing <- t(keyword_per_paper) #reshape df
papers_containing <- subset(papers_containing, papers_containing[, "ethic"] > 0) #filter papers where word is mentioned 1 or more times
papers_containing <- data.frame(paper = row.names(papers_containing)) #store papers as a df col
#### calculate and plot the total number of papers that contain each key word ####
#calculate how many papers mention each keyword
keyword_usage <- data.frame(paper_count = rowSums(keyword_per_paper > 0))
#fix row names as col so they can be plotted
keyword_usage$keyword <- rownames(keyword_usage)
#plot the number of papers each keyword is found in
ggplot(data = keyword_usage, aes(x = keyword,
y = paper_count,
fill = keyword)) +
geom_bar(stat="identity") +
theme_minimal() +
theme(legend.position = "none")
|
4f42fb189e84909b676e4595051081e2862216c9
|
2c6cad6728b4ad2181981fb0ca5d7345d69b149b
|
/R/pretty_labels.R
|
49b58b57f2be636ba93d573ec2b250b79f207db2
|
[] |
no_license
|
matteodefelice/panas
|
a765b2cca4a51b41485dbe538d55a57ba5aff6f9
|
428a8b7cd2c1560f8ef9d54533e2c8e93e344c19
|
refs/heads/master
| 2022-04-05T12:25:29.429213
| 2020-02-27T20:46:54
| 2020-02-27T20:46:54
| 111,098,601
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 824
|
r
|
pretty_labels.R
|
#' Create pretty labels useful to visualise discrete intervals
#'
#' The function creates pretty labels for the breaks used in discrete scales in \code{ggplot2}
#' @author Matteo De Felice
#' @param breaks Vector of breaks' values
#' @param separator_character Character that is used to separate the values of the range shown in the legend. Default is ",".
#' @return A caharacter vector with all the labels. The labels' lenght is equals to the length of the provided breaks +2, the extremes.
pretty_labels <- function(breaks, separator_character = ',') {
labels = rep(NA, length(breaks)+1)
labels[1] = paste0('<', breaks[1])
for (i in 1:(length(breaks)-1)) {
labels[i+1] = paste0(breaks[i], separator_character, breaks[i+1])
}
labels[length(breaks)+1] = paste0('>', breaks[length(breaks)])
return(labels)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.