blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
167395623dc0cc3f89882fd6bd2da67801c28e80
|
357c61695c0b2885916745226b5d0dc7408766c0
|
/BAMMtools/R/arc.R
|
f2afaddda3b13a4829f9072c74d56d3b2370b379
|
[] |
no_license
|
macroevolution/bammtools
|
62b4f9c6dd20ea37d1df6b7dd75d10967a8f3e75
|
07a17d8260a9e17419ca4bbc27687b4b6a7164be
|
refs/heads/master
| 2022-11-22T15:11:11.336582
| 2022-11-11T17:08:43
| 2022-11-11T17:08:43
| 17,520,404
| 7
| 7
| null | 2016-05-05T21:09:28
| 2014-03-07T16:42:06
|
R
|
UTF-8
|
R
| false
| false
| 1,700
|
r
|
arc.R
|
##################################
# Internal function called by plot.dtrates(...)
# Arguments:
# x,y = coordinates of center of curvature of arc, e.g. (0,0)
# theta1 = initial theta of arc (radians)
# theta2 = ending theta of arc (radians)
# rad = radius of arc
arc <- function(x,y,theta1,theta2,rad,border,...)
{
noTips <- which((theta2 - theta1) != 0);
if ((length(theta1)+1)/2 > 1000) {
steps <- (theta2-theta1)/30;
steps <- steps[noTips];
theta1 <- theta1[noTips];
theta2 <- theta2[noTips];
rad <- rad[noTips];
border <- border[noTips];
for (i in 1:length(steps))
{
xv <- x+rad[i]*cos(seq(theta1[i],theta2[i],steps[i]));
yv <- y+rad[i]*sin(seq(theta1[i],theta2[i],steps[i]));
lines(xv,yv,lend=2,col=border[i],...);
}
}
else {
#storing all the coords up front for fast arc plotting, so can be memory intensive.
#tested on tree with 6670 tips with no problem, but for now only use
#for trees under 1000 tips
m <- matrix(NA, nrow=4, ncol=length(noTips));
m[1,] <- theta2[noTips];
m[2,] <- theta1[noTips];
m[3,] <- rad[noTips];
m[4,] <- border[noTips];
arcsegs <- apply(m, 2, function(z) {
zz <- as.numeric(z[1:3]);
inc <- (zz[2] - zz[1])/30
xv <- zz[3]*cos(seq(zz[1],zz[2],inc));
yv <- zz[3]*sin(seq(zz[1],zz[2],inc));
xv <- rep(xv, each=2);
xv <- xv[-c(1,length(xv))];
xv <- matrix(xv, ncol=2, byrow=TRUE);
yv <- rep(yv, each=2);
yv <- yv[-c(1,length(yv))];
yv <- matrix(yv, ncol=2, byrow=TRUE);
data.frame(xv,yv,rep(z[4],nrow(xv)),stringsAsFactors=FALSE);
});
arcsegs <- do.call(rbind, arcsegs);
segments(x+arcsegs[,1], y+arcsegs[,3], x+arcsegs[,2], y+arcsegs[,4], col=arcsegs[,5], lend=2, ...);
}
}
|
e6dc6f0e90802635192c9923ea5bee585cc0c491
|
257a9d151f700a429b79a7d8a34fb1298b807cf2
|
/FinalAssignmentR.R
|
09f3eee3da807926b6fc9dc4d970a174bf4606a8
|
[] |
no_license
|
jmepstein/mammalian_body_size
|
bc322d4910a6f6cd7a02487d3f4df528a91ec6b3
|
c13f7ef26223e64ca1f252a1e9b5d9055ec34f59
|
refs/heads/master
| 2021-01-10T09:53:39.834737
| 2015-11-13T19:53:12
| 2015-11-13T19:53:12
| 46,136,109
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,342
|
r
|
FinalAssignmentR.R
|
# R Assignment 8 (Final Assignment)
#Installing and loading packages:
install.packages("dplyr")
library(dplyr)
#Importing mammal body size data:
mammal_sizes <- read.csv("MOMv3.3.txt", header=F, sep = "\t", na.strings = "-999",
stringsAsFactors = F)
head(mammal_sizes)
#Adding column names to mammal body size data:
colnames(mammal_sizes) <- c("continent", "status", "order",
"family", "genus", "species", "log_mass", "combined_mass",
"reference")
head(mammal_sizes)
mammal_sizes$continent
#Calculating the mean mass of the extinct species and the extant spcies:
mean_mass_extinct <- mean(mammal_sizes$combined_mass[mammal_sizes$status == "extinct"], na.rm = T)
mean_mass_extant <- mean(mammal_sizes$combined_mass[mammal_sizes$status == "extant"], na.rm = T)
#Calculating the mean mass of extinct species among different continents:
extinct_mass <- mammal_sizes %>%
filter(status == 'extinct')%>%
group_by(continent) %>%
summarize(mean(combined_mass))
#Calculating the mean mass of extant species among different continents:
extant_mass <- mammal_sizes %>%
filter(status == 'extant')%>%
group_by(continent) %>%
summarize(mean(combined_mass, na.rm=T))
|
5c0c8c1e3a9b5819d2f672689683794d95f61544
|
896e730c38dcc06bc34c57032309d6c3faaae656
|
/plot3.R
|
d8e480be31a75de66cfb472108acc41019fca797
|
[] |
no_license
|
elfatherbrown/ExData_Plotting1
|
0c6a2aa4520b8d530c770dfb6675d5f7a76c37b2
|
1064b0ddfcb4efc50a20d1259de6a2585c14cdcb
|
refs/heads/master
| 2020-06-03T04:22:18.557776
| 2019-06-11T23:43:14
| 2019-06-11T23:43:14
| 191,436,203
| 0
| 0
| null | 2019-06-11T19:23:20
| 2019-06-11T19:23:19
| null |
UTF-8
|
R
| false
| false
| 753
|
r
|
plot3.R
|
source("./downloadandpresent.R")
png(
file = "plot3.png",
bg = "white",
width = 480,
height = 480
)
rd <- d
rd$fulltime <- with(d, ymd_hms(paste(Date, Time)))
rd <- rd %>% select(Date, Time, fulltime, contains("meter"))
plot(
rd$fulltime,
rd$Sub_metering_1,
col = "black",
type = "l",
xlab = "",
ylab = "Energy sub metering"
)
points(rd$fulltime,
rd$Sub_metering_1,
col = "black",
type = "l")
points(rd$fulltime,
rd$Sub_metering_2,
col = "red",
type = "l")
points(rd$fulltime,
rd$Sub_metering_3,
col = "blue",
type = "l")
legend(
"topright",
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty = c(1, 1, 1),
col = c("black", "red", "blue")
)
dev.off()
|
684e50ab8821ef3137d0c1580868964f17ab564b
|
12b690443ef108af4758e216952f0b3e154d77aa
|
/org/koyck/koyck.data.R
|
a0b9758b1a4efafe886f8548a3fbead251232232
|
[] |
no_license
|
FinancialEngineerLab/new_org
|
55ab854f6d28e4052ad46c3f6041070e13fefb03
|
fb1e42cc5e300ecb32c2fa30ddbc9fef37182c64
|
refs/heads/main
| 2023-02-25T03:16:00.969189
| 2021-02-01T06:57:50
| 2021-02-01T06:57:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,694
|
r
|
koyck.data.R
|
alpha <- -1.25
beta <- 0.75
lambda <- 0.6
sigma <- 0.5
t <- 200
T <- 200
x <-
c(0.0530897649405518, -0.427176340746955, 0.575506045064776,
-1.05503057032362, -0.00138425373659317, 0.362367184144129, -0.906400668762085,
1.39464604836768, -1.40047244298115, -0.872458836353285, -0.425555167755021,
-0.192907289991263, 0.611345320709905, 0.223493915844394, -0.335855643300744,
-0.786177511979181, 1.0466888412128, -1.35578280849525, -0.4802662607177,
-0.482825573577857, -0.44808934155094, 1.41677018342172, -1.12722529948411,
-1.52518755310289, -0.232336215009525, -0.452482201859223, -0.235580964484178,
-0.789514125170731, 1.61944620303219, 0.735756130006795, -1.0253656503392,
0.936124304383727, 1.10587169595422, 0.833182124741184, 0.113696178051401,
-1.15636049024915, 1.85507312756962, 1.30957252757083, 0.822235075588577,
-1.43800316565635, 2.45486908348549, -1.08724498795646, 0.776848971229546,
0.597832889170205, 2.03511422033798, 0.644031974815851, -0.34424546586535,
2.09576430506358, -1.31819393675716, -0.983349646765292, -0.253801323059,
0.0400124405198525, 1.32321740192456, 1.34863646143983, 1.05048409532454,
-0.509720742529067, 0.0583439628563658, 0.0887314886044004, -0.726046306949009,
-0.124104967380039, -0.245313277544579, 0.725831211329334, 0.607978496053649,
0.181657843933283, 0.73815311007313, -0.374541478274023, -0.0738836434686445,
-1.53490973390844, -0.0262768256018297, -0.708707193817893, -2.02297757434895,
-2.32349195838356, 0.312767235408867, 0.111621971594657, -0.0486279794591362,
-1.03172002124525, 1.30743755616116, -0.646820784669093, -1.29373998637876,
0.894575449798507, -1.76981947585609, -1.14236410068953, 0.403857478744062,
-2.12921656010045, -0.589017366268077, 0.071101314893689, -1.49141556432381,
1.43248057951142, 1.53005654182991, 0.684382343682809, -0.626254748313278,
-1.66998326386523, 0.307225175718816, -1.14775010223373, 0.633485431094213,
-0.207073393267944, 0.901346380417458, -0.308195298221923, -1.06791275423866,
0.86992149275717, -0.426881642150831, 0.115808663647752, -0.776494357107597,
0.247422445047089, 1.05522379309809, -1.31958782025799, -0.391354950582274,
0.426506175515581, 0.664791610558491, -0.44840806071706, 1.00515517216318,
-0.298830415764068, -1.45426953465826, 1.16084822745438, 0.186322749360665,
0.0681380332584535, 0.501679696665045, 0.521148639033558, 0.0771250975142938,
0.91567686328051, 0.930963990274647, -0.101379377989806, -0.659400893776547,
-2.93430663548289, 0.4305911589658, -0.602980752620163, -0.116261157768898,
-2.14436623609354, 0.178345268916593, -0.720462073504647, -0.294145674009659,
0.6811564636593, -1.34902276293778, -1.35212940883419, 0.687889813722245,
0.810848673752428, 0.0681166277415714, 2.15572182279945, -0.176516105418745,
0.910537709334256, 0.506788528869505, -0.610780399612099, 1.01306296436353,
0.677682956223761, 1.67062738783569, -1.28442232814821, -1.11758649596499,
0.41044634378664, -1.25837206120304, -1.76518575206184, 0.472415794758484,
-1.55651316459279, 0.811424059704869, -0.348076092742071, -1.93542889429846,
-1.27503332162282, 0.121510449519021, -0.530208668039229, 0.0905963424346772,
0.524541603442341, 0.8049641377789, 0.0372969351941385, 1.11477417429956,
0.620366674345649, 0.164001146980826, 0.231186126888576, -1.05636694564303,
0.140635919700479, -0.0966063682598803, -0.537220880727607, -1.30262040114514,
-1.70033888808568, -0.101986551512734, 0.995249976762152, -0.569683157513002,
-0.630851552811451, 1.67896100566997, 0.976815217463211, 0.5098001043689,
0.516241985579497, -1.12800423304803, -0.171609065759775, -0.780341145066126,
-1.83061961284957, 0.945415534507578, -0.798069024258001, -0.51127186733561,
-1.21659524640471, 0.655113290207323, 0.774610459212254, 1.64559840345851,
-0.324369739857081, -0.774697486261874, 0.802822137221436, -0.254920352794068,
0.188602777224388, -0.765038289826862, -0.194109566182363, -0.673999748759512,
-0.21824244519812)
y <-
c(-1.09070023335653, -2.49513228748069, -2.07918898737732,
-3.34621083464711, -3.21905761756761, -3.84835139936697, -4.14591579938845,
-2.17085702918779, -2.72986009710348, -2.35417403593087, -3.05798313015489,
-2.85360404455837, -2.9261492993768, -2.67713444957349, -2.97731974867721,
-4.7510500052692, -3.2939903406896, -4.11626048191435, -4.08086402479665,
-3.34743297842617, -3.59294970060833, -1.70489370379594, -3.522911840642,
-4.13944648650588, -4.04992280586435, -3.71707285571859, -2.6830205191276,
-3.21920265745786, -1.78221835462429, -1.71385611701893, -3.15284681754168,
-3.26094915407183, -2.92240028325557, -2.40186352631743, -3.1697331801915,
-4.00540748111558, -2.32625323220302, -1.74827635320394, -1.05716214917384,
-3.44536781268663, -1.60076383241359, -2.4995631336394, -2.39682556324585,
-2.41742180273721, -1.20018896465066, -0.660959903434905, -2.50872359419894,
-0.981167657551139, -3.52840797494622, -5.18250014689306, -3.57819505933194,
-3.04247090043578, -2.41838445579745, -1.81714448583416, -2.06230272157049,
-2.60231394274662, -2.8906784951591, -2.33779303024163, -3.37429413128399,
-3.64638930186805, -3.55555741328153, -3.20842234238503, -1.71576206423309,
-2.53417669028076, -2.39598643533502, -2.36790596717843, -3.55158489023092,
-4.89167815343294, -4.14892290360731, -4.06985327605721, -3.95378984988999,
-4.66400309186835, -2.73133546476362, -2.28638634477692, -2.73309251988071,
-3.81059523104289, -2.00885046638844, -2.98587983899235, -3.3145155954839,
-2.02989635165476, -3.86060804184998, -5.27081480150072, -3.69413594312219,
-5.28656838289875, -4.34676905337115, -2.7408849686209, -3.97007859057282,
-3.98985733436963, -2.76597103162415, -2.21736264629883, -4.22395699571434,
-5.60981442510417, -3.91020921696487, -3.94914415707429, -2.97976481610914,
-3.36147868510156, -2.76779160699093, -2.78864168926835, -4.4066528720631,
-2.92491387256984, -3.5017930300726, -3.35935973572259, -3.92035477233001,
-3.57879294835368, -2.01774887022139, -4.12554808360893, -3.44248547574571,
-3.0741570681026, -3.18058613466552, -2.80872759409651, -2.96351133882612,
-3.07710808939806, -3.39570508735376, -2.50262977320957, -2.41143884134249,
-2.64209636008832, -2.50290519681208, -2.02066269381151, -2.74036886013334,
-2.1932039080363, -2.56099855200113, -2.2738696303452, -3.36193455954126,
-4.33704521342994, -2.94125120765948, -4.17858985601643, -4.15974045243399,
-5.42132753177721, -3.95365908237044, -3.34784173147242, -3.59842776551002,
-3.10662378824361, -3.40194403582696, -3.67095738150516, -3.01611958691311,
-1.62535708866357, -3.18830023561942, -1.64447449561458, -1.78366632665983,
-1.70937282203804, -2.14119615622869, -3.24977170289747, -2.32580329104593,
-2.32675260653034, -1.1717486653723, -3.77503648771856, -4.19718412568603,
-3.89431771097904, -4.59642820923442, -4.95286040179107, -4.40998305197017,
-4.54881183677817, -3.85983643131139, -3.75430589802538, -5.46021456749464,
-5.2664662530989, -3.75814510108539, -3.294345905951, -2.62161453911254,
-2.15693708803611, -1.92432968592694, -2.56041616890343, -2.1030089955903,
-2.56973391915782, -2.21472061622097, -2.86470255577041, -2.80824250791949,
-2.53792988766351, -3.50684866452531, -3.81222723411719, -5.37627884550791,
-5.81812617544755, -5.37336427260018, -3.75954820839513, -3.90402097398199,
-4.20104963503409, -3.3857707171127, -2.31279090501864, -1.9083859872346,
-1.99317473036745, -3.97779059101464, -3.57871308105812, -4.78848598371836,
-5.45786618656228, -4.15448435116396, -4.07349996818775, -3.96953967403312,
-5.1236160728501, -4.33649465394156, -3.86090436219475, -3.13149047625303,
-3.64648534015647, -4.02219763172818, -3.9809452211182, -4.83489317158766,
-3.73747393179458, -3.61740567923615, -3.66673852051606, -3.56650401332061,
-3.6445122216318)
|
42ac1de820a9d62881ec15daec44cc17818c0876
|
4b79446af819574bee58d92a97638efd900fe6fd
|
/RandomForest_parit.R
|
3477d0773d95ce4b3c869d86ebc79c19ffeae723
|
[] |
no_license
|
shoili/soccerdatamining
|
ea54b4e55d8993b057a3b1eba63ced726d159b1f
|
d1bcdc6477613092d24ecf8eee29e72064cc72ce
|
refs/heads/master
| 2021-03-27T16:36:27.217080
| 2017-07-17T12:55:24
| 2017-07-17T12:55:24
| 84,890,208
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,430
|
r
|
RandomForest_parit.R
|
setwd("C:\\Users\\Parit\\Desktop\\Classwork\\Georgia Tech\\Spring 2017 work\\ISyE 7406 - Data Mining and Statistical Learning\\Project")
data <- read.csv('soccer_4_14.csv',header=TRUE)
y <- data$home_team_goal - data$away_team_goal
y[y>0] <- 1
y[y<0] <- -1
data1<-data
data[,c('home_team_goal','away_team_goal','Goal_Diff_away','Goal_Diff_Home','outcome',
'id', 'country_id', 'match_api_id', 'home_team_api_id')]<-NULL
data[,c('home_team_goal','away_team_goal','Goal_Diff_away','Goal_Diff_Home','outcome',
'id', 'country_id', 'match_api_id', 'home_team_api_id','away_team_api_id','id1_home_home',
'team_fifa_api_id_home_home', 'id2_away_away', 'team_api_id1_away_away',
'team_fifa_api_id1_away_away', 'id5','date','team_short_name_home_home','team_short_name1_away_away',
'id6_away' ,'team_api_id3_away' ,'date2_away','date1_home' ,'outcome',
"team_long_name1_away_away","team_long_name_home_home")]<-NULL
datanobet <- data
datanobet <- datanobet[""]
filternull<-data[ , apply(data, 2, function(x) !any(is.na(x)))]
is.fact <- sapply(filternull, is.factor)
cat<- filternull[is.fact]
# filternull$Goal_Diff_Home_10.Games <- as.numeric(filternull$Goal_Diff_Home_10.Games)
# filternull$shoton <- as.numeric(filternull$shoton)
# filternull$shotoff <- as.numeric(filternull$shotoff)
# filternull$foulcommit <- as.numeric(filternull$foulcommit)
# filternull$card <- as.numeric(filternull$card)
# filternull$possession <- as.numeric(filternull$possession)
# filternull$corner <- as.numeric(filternull$corner)
# filternull$cross <- as.numeric(filternull$cross)
filternull$stage <- as.factor(filternull$stage)
filternull$country_id <- as.factor(filternull$country_id)
y<-as.factor(y)
filternull['y']<-as.factor(y)
data['y']<-as.factor(y)
epl <-filternull[filternull$League_name == "England Premier League",]
yepl <-y[filternull$League_name == "England Premier League"]
### Train test partitioning
n = dim(data)[1]; ### total number of observations
n1 = round(n/5); ### number of observations randomly selected for testing data### set the random seed
flag = sort(sample(1:n, n1));
train = data[-flag,]; test = data[flag,];
ytr = y[-flag]; yt = y[flag];
trainf = filternull[-flag,]; testf = filternull[flag,];
ne = dim(epl)[1]; ### total number of observations
ne1 = round(ne/5); ### number of observations randomly selected for testing data### set the random seed
flage = sort(sample(1:ne, ne1));
traine = epl[-flage,]; teste = epl[flage,];
ytre = yepl[-flage]; yte = yepl[flage];
library(randomForest)
fit <- randomForest( y~ .,
data=traine,
importance=TRUE,
ntree=100)
predtrain <- mean(predict(fit,traine) == ytre)
predtest <- mean(predict(fit,teste) == yte)
fit <- randomForest( y ~ Goal_Diff_away_10.Games + Goal_Diff_Home_10.Games + buildUpPlayPositioningClass_home +
buildUpPlayPositioningClass1_away + chanceCreationPositioningClass_home +
chanceCreationPositioningClass1_away + defenceAggression_home +
chanceCreationShooting_home + chanceCreationShootingClass_home +
chanceCreationShooting1_away + chanceCreationShootingClass1_away +
buildUpPlayPassing_home + chanceCreationPassing_home + chanceCreationPassing1_away +
stage + defenceDefenderLineClass_home + buildUpPlayPassingClass_home +
chanceCreationCrossing1_away + defenceAggression1_away +
Country_Name_home,
data=trainf,
importance=TRUE,
ntree=500)
predtrainf <- mean(predict(fit,trainf) == ytr)
predtestf <- mean(predict(fit,testf) == yt)
library(party)
fitp <- cforest(y~.,
data = train,
controls=cforest_unbiased(ntree=100, mtry=3))
Prediction <- mean(predict(fitp, test, OOB=TRUE, type = "response") ==y)
#entropy
library(FSelector)
weights <- information.gain(y~., data)
print(weights)
subset <- cutoff.k(weights, 20)
f <- as.simple.formula(subset, "y")
print(f)
weights <- information.gain(y~., filternull)
print(weights)
subset <- cutoff.k(weights, 20)
f <- as.simple.formula(subset, "y")
print(f)
write.csv(weights,"dataingfogainnobets.csv")
|
c7b12be9bdfde3bde1498de37460b14e560129df
|
9eeabfd860248fd8a24044c80c44ccfca6280f29
|
/UI.R
|
e44921c535e599ed6f9cd7d2553023126226cd92
|
[] |
no_license
|
katywren/FAIR_Shiny
|
57d3a3a78920248b03038ae34d2d0187462dc31c
|
25e35fb0d4c82812c87e245cb5d84c21fb34782a
|
refs/heads/master
| 2020-08-17T03:49:37.340014
| 2019-10-16T19:57:48
| 2019-10-16T19:57:48
| 215,602,948
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,091
|
r
|
UI.R
|
ui = navbarPage("FAIR EU Recommendations App",
tabPanel("About",
mainPanel(h2("Stakeholder App"),
br(),
p("In 2018, the EU produced the document ",
a("Turning FAIR into Reality.",
href = "https://ec.europa.eu/info/sites/info/files/turning_fair_into_reality_0.pdf"),
"Part report, part action plan, this document included a list of stakeholder assigned actions and recommendations for interested parties who wish to make data FAIR. This app has been built to facilitate easy navigation of these recommendations." ),
br(),
p("To use, navigate to the ",
strong("Stakeholders"),
" tab to identify your community. Then, navigate to the ",
strong("App"),
" tab and select the relevant stakeholder group and the recommendation you wish to enact from the drop down lists. The app will then deliver the relevant actions your stakeholder group must enact to achieve the recommendations, according to the EU document"),
img(src = "FAIR_Doc_Home_Page.png", align = "centre")
)),
tabPanel("Stakeholders",
mainPanel(tableOutput("stake_table"))),
tabPanel("App",
pageWithSidebar(
headerPanel(" "),
sidebarPanel(
uiOutput("select_var1"),
uiOutput("select_var2")
),
mainPanel(tableOutput("table")
)
)
)
)
|
aa8f7d92d8b2c3ecebfa76f017216239c4af2b73
|
79c36961decb27e9fe6d3fe5ce6f185cce5c680d
|
/man/searchFrames.Rd
|
3ead052cb2189102dae3910e1cb9d60a7506acec
|
[] |
no_license
|
lkumle/PreProcessingDistractor
|
efe09db7bf23eddcd82978f4b85b408c7b391101
|
9034b176899092c89e1db69da7a698b20a71da41
|
refs/heads/main
| 2023-08-25T01:03:29.038851
| 2021-11-03T00:32:39
| 2021-11-03T00:32:39
| 424,042,533
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 341
|
rd
|
searchFrames.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/searchFrames.R
\name{searchFrames}
\alias{searchFrames}
\title{Read in data files for Object Sorting Task in VR.}
\usage{
searchFrames(df_main, cutoff = 10)
}
\description{
Does not explicitely return data but assigns it to global variable inside the function.
}
|
e5ce97a4e3d6c32b3928766be3de6b629631b8fb
|
c7e1c0014b9e279e9f38a763e4f2ad1ddd260194
|
/R/data_description.R
|
9332799a93a0d8f694cacdfcabdd6765c5fa33f2
|
[] |
no_license
|
ipstone/germVar2
|
476d5081183044520d8b658300bbc0fd9b8f0a0f
|
f7bc89f46d8b88e11a3972f47201c5ae42ece12e
|
refs/heads/master
| 2020-04-08T16:34:38.600777
| 2015-08-19T18:45:19
| 2015-08-19T18:45:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,483
|
r
|
data_description.R
|
#' Clinical info of 8852 TCGA patients
#'
#' A dataset containing the attributes of 8852 TCGA patients . The variables are as follows:
#'
#' @format A data frame with 8852 rows and 11 variables
#' \itemize{
#' \item Patient.
#' \item disease2.
#' \item age. Age at diagnosis.
#' \item agez. Standardized age.
#' \item race2. Race, some are predicted.
#' \item EA. Whether european ancestry.
#' \item gender.
#' \item study_size.
#' \item femme_ratio.
#' \item participant. TCGA patient uid
#' }
#'
"all_patients"
#' Candidate gene list
#'
#' A dataset containing the attributes of 1680 candidate genes. The variables are as follows:
#'
#' @format A data frame with 1685 rows and 11 variables
#' \itemize{
#' \item Gene.
#' \item disease2.
#' }
#'
"list_goi"
#' Info on all identified variants
#'
#' A dataset containing the attributes of 8852 TCGA patients . The variables are as follows:
#'
#' @format A data frame with 8852 rows and 11 variables
#' \itemize{
#' \item Patient.
#' \item disease2.
#' \item age. Age at diagnosis.
#' \item agez. Standardized age.
#' \item race2. Race, some are predicted.
#' \item EA. Whether european ancestry.
#' \item gender.
#' \item study_size.
#' \item femme_ratio.
#' \item participant. TCGA patient uid
#' }
#'
"nsSNP_vars"
#' Info on all identified variants
#'
#' A dataset containing the attributes of 8852 TCGA patients . The variables are as follows:
#'
#' @format A data frame with 8852 rows and 11 variables
#' \itemize{
#' \item Patient.
#' \item disease2.
#' \item age. Age at diagnosis.
#' \item agez. Standardized age.
#' \item race2. Race, some are predicted.
#' \item EA. Whether european ancestry.
#' \item gender.
#' \item study_size.
#' \item femme_ratio.
#' \item participant. TCGA patient uid
#' }
#'
"LoF_vars"
#' Info on all identified variants
#'
#' A dataset containing the attributes of 8852 TCGA patients . The variables are as follows:
#'
#' @format A data frame with 8852 rows and 11 variables
#' \itemize{
#' \item uid.
#' \item Patient.
#' \item agez. Age at diagnosis.
#' }
#'
"nsSNP_muts"
#' Info on all identified variants
#'
#' A dataset containing the attributes of 8852 TCGA patients . The variables are as follows:
#'
#' @format A data frame with 8852 rows and 11 variables
#' \itemize{
#' \item uid.
#' \item Patient.
#' \item agez. Age at diagnosis.
#' }
#'
"LoF_muts"
|
095e2fc8e3296a61b9c17949106fb3ccb13519f9
|
5501ebe1bb155819a4e4c103db6a2714aff9998f
|
/global.R
|
06d6be6c8a10827e5a417fbcb72e816d0651fb46
|
[] |
no_license
|
yannickkk/tirages_tiques
|
2eb4a03648282b5585c882141c5fdfcae092be9c
|
5c2d6cb2fc438a6aa05fb8566a5b74646864f007
|
refs/heads/master
| 2020-03-20T00:21:23.012912
| 2019-11-06T17:16:04
| 2019-11-06T17:16:04
| 137,040,708
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 163
|
r
|
global.R
|
library(shiny)
library(shinythemes)
library(DT)
library(dplyr)
library(shinyBS)
library(shinyTime)
library(RPostgreSQL)
library(shinyalert)
library(chron)
|
9a6f3fc7917fd6a88dbf889f55f6cfa7dee74b80
|
d1030a067b465a5604fc2dae84ca75820bfe0943
|
/Capitulo2/Ejemplos/Ejemplo1.R
|
a95b46a92be593eefe3866d7ab37a23c30c22d1f
|
[] |
no_license
|
DanielCarmine/TareaLibro
|
62431577e297fcbcd84417e092d764dc97e31dd2
|
d760c955c86424e6e8f0ed3e6a0c994b785e97c0
|
refs/heads/master
| 2020-12-20T02:57:06.530510
| 2020-02-03T02:12:27
| 2020-02-03T02:12:27
| 235,940,071
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 50
|
r
|
Ejemplo1.R
|
library(MASS)
library(car)
plot(UScereal, las = 2)
|
e1da2d2001e04ac020a38f6d51516e894a18dbef
|
c7e9a7fe3ee4239aad068c6c41149a4a09888275
|
/OLD_GALLERY_RSCRIPT/#126_add_a_segment_to_a_plotly_graph.R
|
3d00ef9723e0d7913835a3d39397dfbe7e2209b8
|
[
"MIT"
] |
permissive
|
holtzy/R-graph-gallery
|
b0dfee965ac398fe73b3841876c6b7f95b4cbae4
|
7d266ad78c8c2d7d39f2730f79230775930e4e0b
|
refs/heads/master
| 2023-08-04T15:10:45.396112
| 2023-07-21T08:37:32
| 2023-07-21T08:37:32
| 31,253,823
| 591
| 219
|
MIT
| 2023-08-30T10:20:37
| 2015-02-24T09:53:50
|
HTML
|
UTF-8
|
R
| false
| false
| 1,137
|
r
|
#126_add_a_segment_to_a_plotly_graph.R
|
# -----------------------------------------------------------------------------------------------------------------------------------------------------
#126 : Add a segment to a plotly graph.
# Let's start by charging the plotly library and having a look to the iris dataset that we will use.
library(plotly)
Sys.setenv("plotly_username"="holtzy")
Sys.setenv("plotly_api_key"="w916mo7a8t")
#Let's use the mtcars dataset :
head(mtcars)
# Make the graph
my_graph=plot_ly(mtcars, x = mpg, y = disp, mode = "markers" , marker=list( color=ifelse(mpg>20,"red","blue") , opacity=0.5 , size=30) ) %>%
#Add the segment with add_trace
add_trace(x = c(20, 20), y= c(min(disp), max(disp)), mode = "lines") %>%
#Layout
layout(showlegend=F)
my_graph
#Save
plotly_POST(my_graph, filename = "#126_add_a_segment_to_a_plotly_graph", world_readable=TRUE)
plotly_IMAGE(my_graph, width = 480, height = 480, format = "png", scale = 1, out_file = "#126_add_a_segment_to_a_plotly_graph.png")
# -----------------------------------------------------------------------------------------------------------------------------------------------------
|
a1d4d2415ce1a8c3d941e18f466329f0af8e61d6
|
65f970d1ea3f922b13ac155f84783e8a2173a3cc
|
/man/shinycopy.Rd
|
6aa0992341d53803459e222da17659620d4a7e39
|
[
"MIT"
] |
permissive
|
kasperwelbers/shinyBZpers
|
b7cc2e076e749a3e8f68063f371b1190cf7f38b3
|
17e7d1970c72790de017e2c9718a9c88f6e6274f
|
refs/heads/master
| 2021-07-08T07:01:57.728858
| 2021-06-29T06:31:33
| 2021-06-29T06:31:33
| 237,210,903
| 0
| 1
|
NOASSERTION
| 2021-04-12T16:58:26
| 2020-01-30T12:42:55
|
R
|
UTF-8
|
R
| false
| true
| 221
|
rd
|
shinycopy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run_app.R
\name{shinycopy}
\alias{shinycopy}
\title{Run the Shiny Application}
\usage{
shinycopy()
}
\description{
Run the Shiny Application
}
|
e2cbc9d4ad1850ad24220ddc6788f6030dfa9b30
|
2f090b2feaff63f96269a09197797dd692df77e3
|
/man/blockDiag.Rd
|
ac7f1b0414d8c89f8d2d832d9ffaab0fffe00f5c
|
[] |
no_license
|
rjbgoudie/utils.rjbg
|
6d17f8189ec5934c76d82c4c717e622b4c8f22cb
|
8691850461ce9c9066d6e6e3bcea11e76e626521
|
refs/heads/master
| 2021-01-17T17:15:23.075727
| 2014-09-08T10:48:49
| 2014-09-08T10:48:49
| 63,424,890
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 195
|
rd
|
blockDiag.Rd
|
\name{blockDiag}
\alias{blockDiag}
\title{blockDiag}
\usage{
blockDiag(nr, nc, sample.size)
}
\arguments{
\item{nr}{...}
\item{nc}{...}
\item{sample.size}{...}
}
\description{
...
}
|
fd6ab7fc4a066c5469fd480cc72bf0e92c4fd17b
|
1451c80d0462f459c9ebb34d9c7cc325fb6a8875
|
/src/plot_kha.R
|
f8ac5e7af01755c2dbe3af6efa6a245dabfd7511
|
[] |
no_license
|
TomHarrop/asw-nopcr
|
b1ddc2111bc373bdf64837c389cb5344106192d6
|
8c7e3ba6a6790654ff2bdf22bfb2f5f6759e699f
|
refs/heads/master
| 2021-01-19T20:54:28.797777
| 2020-08-04T02:07:22
| 2020-08-04T02:07:22
| 88,576,094
| 0
| 0
| null | 2020-06-19T02:25:08
| 2017-04-18T03:13:50
|
Shell
|
UTF-8
|
R
| false
| false
| 3,126
|
r
|
plot_kha.R
|
#!/usr/bin/env Rscript
# set log
log <- file(snakemake@log[[1]], open = "wt")
sink(log, type = "message")
sink(log, append = TRUE, type = "output")
library(data.table)
library(bit64)
library(ggplot2)
library(scales)
###########
# GLOBALS #
###########
hist_before_file <- snakemake@input[["hist"]]
hist_after_file <- snakemake@input[["hist_out"]]
plot_file <- snakemake@output[["plot"]]
# find files (dev)
# hist_files <- list.files("output/020_norm",
# pattern = "Male_Bee_1_hist.*txt",
# full.names = TRUE)
#
# names(hist_files) <- ifelse(grepl("-out", hist_files), "Normalised", "Raw")
# dev
# hist_before_file <- "output/020_norm/asw_hist.txt"
# hist_after_file <- "output/020_norm/asw_hist-out.txt"
# plot_file <- "test/asw_kha.pdf"
########
# MAIN #
########
hist_files <- c(Raw = hist_before_file, Normalised = hist_after_file)
hist_data_list <- lapply(hist_files, fread)
CalculateKhaStats <- function(x) {
setkey(x, `#Depth`)
x <- x[!last(x)]
x[, cum_sum := cumsum(as.numeric(Raw_Count))]
x[, percent_kmers := 100 * cum_sum / sum(Raw_Count)]
return(x)
}
kha_data_list <- lapply(hist_data_list, CalculateKhaStats)
kha <- rbindlist(kha_data_list, idcol = "type")
gp <- ggplot(kha, aes(x = `#Depth`, y = percent_kmers, colour = type)) +
theme_minimal(base_size = 8) +
theme(legend.justification = c("right", "bottom"),
legend.position = c(0.9, 0.1),
legend.key.size = unit(0.5, "lines"),
axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5)) +
scale_color_viridis_d(guide = guide_legend(title = NULL)) +
xlab("Depth") + ylab("Cumulative percentage of read 31-mers") +
ylim(c(0, 100)) +
scale_x_continuous(trans = log_trans(base = 4),
breaks = trans_breaks(function(x) log(x, 4),
function(x) 4^x)) +
geom_path(alpha = 0.75)
# find the elbows
kha_diff <- kha[`#Depth` <= 256,
.(diff_pct = diff(diff(Raw_Count) > 0) != 0,
`#Depth` = `#Depth`[c(-1, -2)],
percent_kmers = percent_kmers[c(-1, -2)]),
by = type]
kha_diff[, percent_repeat :=
paste0("~", round(100 - percent_kmers, 0), "% repeats")]
gp_with_elbows <- gp +
geom_hline(data = kha_diff[diff_pct == TRUE][c(5, 21)],
mapping = aes(yintercept = percent_kmers,
colour = type),
linetype = 2,
show.legend = FALSE) +
geom_text(data = kha_diff[diff_pct == TRUE][c(5, 21)],
mapping = aes(label = percent_repeat,
x = 0),
hjust = "left",
vjust = -0.1,
size = 2,
show.legend = FALSE)
wo <- grid::convertUnit(grid::unit(483/3, "pt"), "mm", valueOnly = TRUE)
ho <- grid::convertUnit(grid::unit(664/3, "pt"), "mm", valueOnly = TRUE)
ggsave(filename = plot_file,
plot = gp_with_elbows,
device = cairo_pdf,
width = wo,
height = ho,
units = "mm")
sessionInfo()
|
641e0f84ee7c26efd6f717165120b402b8beb9a8
|
657f8f17907fef8a6ebf11355ce37e6ed85a9890
|
/man/rowr.Rd
|
a04e64ae73d749ea6066782f8925ccc2e4327bc7
|
[
"MIT"
] |
permissive
|
jimjunker1/junkR
|
d34c935d266b845cb4fe01fc23243510ed46ea07
|
136f1d7234828a1a35e15942e10fb892645afe3a
|
refs/heads/master
| 2022-05-08T16:25:48.013811
| 2022-04-01T16:15:22
| 2022-04-01T16:15:22
| 165,153,893
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,089
|
rd
|
rowr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rowr.R
\name{rowr}
\alias{rowr}
\title{Row-Based Functions for R Objects}
\description{
Provides utilities which interact with all R objects as
if they were arranged in rows. It allows more consistent and predictable
output to common functions, and generalizes a number of utility functions to
to be failsafe with any number and type of input objects.
}
\details{
This version was pulled from deprecated package on 2021-01-04 to account for
recent archival and code break following update to R 4.0.3
Version: 1.1.3
Date: 2016-12-19
Author: Craig Varrichio <canthony427@gmail.com>
Maintainer: Craig Varrichio <canthony427@gmail.com>
Description: Provides utilities which interact with all R objects as
if they were arranged in rows. It allows more consistent and predictable
output to common functions, and generalizes a number of utility functions to
to be failsafe with any number and type of input objects.
Depends: R (>= 3.0.1)
Imports: methods
License: GPL-3
URL: https://github.com/cvarrichio/rowr
}
|
c98337637bbd270814b39aa7b4a9edd8693d42a1
|
e983d6616614cb0b9ea5e2de6399480ebff96052
|
/man/hbrplot.Rd
|
d2a4ae1fcc4808e37c58db2f6315f5c962230ea5
|
[] |
no_license
|
CerebralMastication/hbrplot
|
79972f11fb301364d558eb9252338b3a0191d99e
|
877c245939ce6877fef5772c79ea6f5d359c59ff
|
refs/heads/master
| 2021-12-29T18:58:24.538537
| 2021-12-22T13:53:23
| 2021-12-22T13:53:23
| 155,118,978
| 8
| 4
| null | 2018-10-31T20:33:52
| 2018-10-28T21:48:19
|
R
|
UTF-8
|
R
| false
| true
| 1,021
|
rd
|
hbrplot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hbrplot.R
\name{hbrplot}
\alias{hbrplot}
\title{Produce a glorious HBR style 2x2 plot}
\usage{
hbrplot(items = LETTERS[1:15], quadrants = c("one", "two", "three",
"four"), x_labels = c(""), y_labels = c(""))
}
\arguments{
\item{items}{A vector of items you want plotted. Typically a string vector.}
\item{quadrants}{A vector of four quandrant names. You can give more than four, but anything after the fourth one is just ignored. Because, seriously.}
\item{x_labels}{A vector of length two which will be coerced into the x axis label.}
\item{y_labels}{A vector of length two which will be coerced into the y axis label.}
}
\value{
A ggplot2 figure that is in the style of a 2x2 plot.
}
\description{
Produce a glorious HBR style 2x2 plot
}
\examples{
hbrplot(items=c('me','you','Dog Named Boo','truck driver'),
quadrants=c('love','pet','feed','neuter'),
x_labels=c('fast','slow'),
y_labels=c('sadly','happily'))
}
|
d411c8c11af7f92046b2ceb39876d1da3e1d17e7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/FSA/examples/chapmanRobson.Rd.R
|
b3a726ba76a7a5bc8191baa31f8278579accef06
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 979
|
r
|
chapmanRobson.Rd.R
|
library(FSA)
### Name: chapmanRobson
### Title: Computes Chapman-Robson estimates of S and Z.
### Aliases: chapmanRobson chapmanRobson.default chapmanRobson.formula
### plot.chapmanRobson summary.chapmanRobson confint.chapmanRobson
### coef.chapmanRobson
### Keywords: htest manip
### ** Examples
plot(catch~age,data=BrookTroutTH,pch=19)
## demonstration of formula notation
cr1 <- chapmanRobson(catch~age,data=BrookTroutTH,ages2use=2:6)
summary(cr1)
summary(cr1,verbose=TRUE)
cbind(Est=coef(cr1),confint(cr1))
plot(cr1)
plot(cr1,axis.age="age")
plot(cr1,axis.age="recoded age")
summary(cr1,parm="Z")
cbind(Est=coef(cr1,parm="Z"),confint(cr1,parm="Z"))
## demonstration of excluding ages2use
cr2 <- chapmanRobson(catch~age,data=BrookTroutTH,ages2use=-c(0,1))
summary(cr2)
plot(cr2)
## demonstration of ability to work with missing age classes
age <- c( 2, 3, 4, 5, 7, 9,12)
ct <- c(100,92,83,71,56,35, 1)
cr3 <- chapmanRobson(age,ct,4:12)
summary(cr3)
plot(cr3)
|
99781dbd05970a2f23752d1fdd6d7cbd0f673d71
|
a2fc6071ce8176f39db921d79cd9dcb27bacb6da
|
/R/textInputRow.R
|
b0a0f1e1a99b9942effbdacd103a39082e19ce18
|
[] |
no_license
|
senickel/surveycleanup
|
bdf0298cea14493506045444589b0c392e1fbac4
|
435a02c8ed42afc467d24820930facbeae660747
|
refs/heads/master
| 2020-04-07T16:21:28.226489
| 2018-03-07T10:54:28
| 2018-03-07T10:54:28
| 124,221,758
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 440
|
r
|
textInputRow.R
|
#' textInputRow
#'
#' check nominal if they are nominal
#' @param Click here and there
#' @keywords nominal
#' @keywords
#' @export
#' @examples
#' checking.nominal()
#' @importFrom magrittr %>%
#'
## fields side by side
textInputRow<-function (inputId, label, value = "") {
div(style="display:inline-block",
tags$label(label, `for` = inputId),
tags$input(id = inputId, type = "text", value = value,class="input-small"))
}
|
f475415f1d46a0b1543dd721c067262e3e6c3319
|
16f50a812eca90748e87bfe471e0c05f178337fd
|
/2do_Semestre/Biologia_computacional/introduccion.r
|
84492ab5886ccc643fc382d7fe03bca243d89967
|
[] |
no_license
|
SeaWar741/ITC
|
65f73365762366f56cfbd6d0bc788cd384672d12
|
5f75716be58ca6e00bcd8dae7546fd19fe37657f
|
refs/heads/master
| 2023-02-05T23:25:13.972031
| 2022-09-29T10:38:32
| 2022-09-29T10:38:32
| 205,020,772
| 4
| 2
| null | 2023-01-19T15:28:45
| 2019-08-28T20:48:35
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 807
|
r
|
introduccion.r
|
a <- 10
b <- 20
a
b
#operaciones basicas
a==b
a<b
a>b
a+b
a+5
#numerica
x <-2
x
x <- 4+3*5^2
x
#caracter
x <- "TP53"
x
x<-letters[26] #en R inician desde 1 los arrays
x
#boolean
x <- 3 ==3
x
x<-4==5
x
x<-4==3+1
x
#funciones
x<- exp #funcion exponencial
x(log(5))
x<-max
x(c(1:4))
trunc(5/2)
a = TRUE
b = FALSE
a-b
b-a
a==b
a!=b
2+3*5
(2+3)*5
#R es un lenguaje interpretado
#vectores secuenciales
seq(from=1, to=10, by=1)
seq(from=1, to=10, length.out=10)
seq(17)
rep(1,times=5)
#indexar vectores
x <- rep(1:5,times = 5)
x
x[1] #1
x[1:6]#1 2 3 4 5 6
x[c(1,6,11,16,21)]#1 1 1 1 1
x[-1]#a x quitar primer elemento del 2 al 5
x[0] #integer 0, no existe por que inician en 1
x[x>3 & x <5] #todos los elementos mayores a 3 y menores a 5
x[x %in% c("a","and","the")]#elementos en el set dado
|
06ee0a27a4f706b7fefe41e51afde68fb2db0e27
|
cc303f4de5eeb3bc4530a52dd963190675a8f9f4
|
/scripts/eems_grid_dist.R
|
d0593a35ae407faedb3ffcccdac69965f418b75b
|
[] |
no_license
|
NovembreLab/eems-around-the-world
|
6bfc5b30d6ce4bc2277b16dc52bbb6e63eaf8809
|
c1443ef034f447cc41e1ff90c3b067e04d6f8fa0
|
refs/heads/master
| 2021-03-27T12:37:14.705011
| 2019-10-23T23:47:45
| 2019-10-23T23:47:45
| 55,014,895
| 14
| 7
| null | null | null | null |
UTF-8
|
R
| false
| false
| 502
|
r
|
eems_grid_dist.R
|
library(dplyr)
library(reshape2)
require(fields)
require(readr)
require(abind)
D <- snakemake@input$mat
outname <- snakemake@output[[1]]
statname=snakemake@params$statname
#load mats
l <- lapply(D, read.table)
l[['along']] = 3
a <- do.call(abind, l)
dmat <- apply(a, 1:2, mean)
rownames(dmat) <- 1:nrow(dmat)
colnames(dmat) <- 1:nrow(dmat)
v <- melt(dmat)
names(v) <- c('grid.x', 'grid.y', statname)
v <- v %>% filter(grid.x < grid.y)
write.csv(v, outname, row.names=F)
|
83bee79d90646968b6dbdfc78d185f8dcf0590fa
|
80e67f800de8107de6aa06b0d0e3613cc0fd8366
|
/Residual Analysis.R
|
d82d9cf6228e6a3375c470086b9d213d9c818c70
|
[] |
no_license
|
khurram6968/Residual-Analysis
|
201f82ace61c75f62f2cbd6c29702be43626d879
|
fb7a56f3abd5505aad1434a3ff2582080a301187
|
refs/heads/master
| 2020-03-30T08:10:11.294280
| 2018-10-01T18:14:02
| 2018-10-01T18:14:02
| 150,994,712
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,601
|
r
|
Residual Analysis.R
|
#Residula Analysis
#Here i use cars data in R
lm_model=lm(dist~speed,data=cars)
lm_model
#We can check heterosedasticity in the models there are two ways:
#1-Graphically
#2-Through Statistical Test
###1
par(mfrow=c(2,2))#to set graph in one panel 2row and 2col.
plot(lm_model)
#If there is no hetroscedasticity in data
#the red line is flat and
#there points is completely random(no pattern) and
#equal distribution throughout range of X-axis.
####But in our case the red line is slightly curved and
#the residuals increase as the fitted Y values.
#This means Heteroscedasticity in our data.
###2
#We use Breusch-Pagan Test and NCV-Test to check heteroscedasticity in our data.
library(lmtest)# for bptest() function
bptest=bptest(lm_model)
bptest
library(car)# for ncvTest() function
ncv_test=ncvTest(lm_model)
ncv_test
#Null Hypothesis H0=Variance of Residuals is constant(no Heteroscedasticity)
###Rectify
#Now we use Box_Cox_Transformation is a mathmetical method a continous variable to transform
#into normal distribution.
library(caret)# for BoxCoxTrans() function
dist_Boxcox_trans=BoxCoxTrans(cars$dist)
dist_Boxcox_trans
dist_new=predict(dist_Boxcox_trans,cars$dist)
cars=cbind(cars,dist_new)
#make new model and check graphically and through statistical test
new_lm.model=lm(dist_new~speed,data = cars)
plot(new_lm.model)
new_bptest=bptest(new_lm.model)
new_bptest
new_ncvtest=ncvTest(new_lm.model)
new_ncvtest
#So Null Hypothesis accepted.there is no heteroscedasicty.
#Why remove heteroscedasticity in model because for better prediction.
|
daf239a0f8add01d1879c2ad4a6637d6f9b531d2
|
afae537bcbbbfea2b260f431a0a3ed50f447a515
|
/results/.ipynb_checkpoints/utils-checkpoint.R
|
db46f801630228d6a7aeb06e40ec145b2b258d0d
|
[
"MIT"
] |
permissive
|
lgarayva/ex-modulo-3-comp-matricial-svd-czammar
|
2adab9c33eeb76173a8dbea9609febb0c4f2d83c
|
c070ad3a2c57c0c02059c460dd6b73d803eebe5d
|
refs/heads/master
| 2022-04-17T22:17:14.176329
| 2020-04-19T18:09:56
| 2020-04-19T18:09:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,629
|
r
|
utils-checkpoint.R
|
### 1.1 Generación de índices
indices <- function(n) {
# Crea una lista de tamaño (n-1)n/2 con pares de índices de la siguiente
# manera: (1,2),..,(1,n),(2,3),..,(2,n),...,(n-1,n)
# Args:
# n (int): número entero postivo
# se refiere al número de columnas de una matriz
#Returns:
# lista con pares de índices
a <- NULL
b <- NULL
indices <- NULL
for (i in 1:(n-1)){
a <- append(a,rep(i,n-i))
b <- append(b,seq(i+1,n))
}
for(i in 1:round(n*(n-1)/2))
indices[[i]] <- list(c(a[i], b[i]))
indices
}
### 1.2 Verificación de ortogonalidad entre vectores
ortogonal <- function(u,v,TOL=10^-8){
# Verifica si dos vectores son ortogonales, de acuerdo a cierto nivel de tolerancia,
# arrojando un 1 si lo es, y un 0 si no lo es.
# Args:
# u (vector): vector de dimension n,
# v (vector): vector de dimension n,
# TOL (numeric): real positivo, que sirve como parametro de tolerancia para evaluar ortogonalidad de u y v.
# Notas:
# 1) Valor por default TOL es 10^-8
# 2) Se sugiere una TOL mayor a 10^-32.
# Returns:
# Valor booleano 0 (no son ortongoales), 1 (son ortogonales)
if ( norm(u,type ="2") < TOL | norm(v,type ="2") < TOL){ret<-0}
else{
if( (u%*%v)/(norm(u,type ="2")*norm(v,type ="2")) < TOL){ret<-1}
else{ret<-0}
}
ret
}
### 1.3 Función signo
signo<-function(x) {
# Indica el signo de un número x
# Args:
# x (numeric): número a revisar
# Returns:
# 1 si el número es positivo o cero
# -1 si el número es negativo
ifelse(x<0,-1,1)
}
### 1.4 Solver dada descomposición SVD
solver <- function(U,S,V,b){
# Construye la solución de un sistema de ecuaciones a partir de matrices
# U, S, V, y vector b. Se asume que S es diagonal.
# Para ello resuelve S d = U^Tb, y construye x=Vd.
# Notas:
# 1) Se utilizó la función backsolve para resolver el sistema triangular.
# 2) Al ser S diagonal, es indistinto si usar un solver para matrices traingulares inferiores o superiores.
# Args:
# U (matriz): matriz para lado derecho de sistema S d = U^Tb, con entrada reales y dimension m x n,
# S (matriz): matriz diagonal, que define sistema sistema S d = U^Tb, con entrada reales y dimension n x n,
# V (matriz): para construir x, con entrada reales y dimension n x n,
# b (vector): vector con el que se forma lado derecho de primer sistema, de dimension m.
# Returns:
# x (vector): vector formado por la solucion de S d = U^Tb, multiplicado por V, con dimension n
d = backsolve(S, t(U)%*%b)
x = V%*%d
return(x)
}
|
3a0f17abd75d4300c17abd08a4760a0f8a86c017
|
360df3c6d013b7a9423b65d1fac0172bbbcf73ca
|
/FDA_Pesticide_Glossary/flumetsulam.R
|
f724e55b9bb98bbc71741d7ba96fab1c24611994
|
[
"MIT"
] |
permissive
|
andrewdefries/andrewdefries.github.io
|
026aad7bd35d29d60d9746039dd7a516ad6c215f
|
d84f2c21f06c40b7ec49512a4fb13b4246f92209
|
refs/heads/master
| 2016-09-06T01:44:48.290950
| 2015-05-01T17:19:42
| 2015-05-01T17:19:42
| 17,783,203
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 222
|
r
|
flumetsulam.R
|
library("knitr")
library("rgl")
#knit("flumetsulam.Rmd")
#markdownToHTML('flumetsulam.md', 'flumetsulam.html', options=c("use_xhml"))
#system("pandoc -s flumetsulam.html -o flumetsulam.pdf")
knit2html('flumetsulam.Rmd')
|
fcfdd228499aeced5c7a566de55912b28573664b
|
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
|
/B_analysts_sources_github/jennybc/r-graph-catalog/fig05-08_monterey-bay-aquarium-data-mosaic-plot-preliminaries.R
|
e3399571a02f2a7f4fd63fa4b625f7a0ad155fc1
|
[] |
no_license
|
Irbis3/crantasticScrapper
|
6b6d7596344115343cfd934d3902b85fbfdd7295
|
7ec91721565ae7c9e2d0e098598ed86e29375567
|
refs/heads/master
| 2020-03-09T04:03:51.955742
| 2018-04-16T09:41:39
| 2018-04-16T09:41:39
| 128,578,890
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,614
|
r
|
fig05-08_monterey-bay-aquarium-data-mosaic-plot-preliminaries.R
|
## NOTE: THE DATA USED WAS ESTIMATED, WE DID NOT HAVE ACCESS TO THE
## RAW/ORIGINAL DATA.
library(ggplot2)
library(plyr)
library(reshape2)
library(gridExtra)
this_base <- "fig05-08_monterey-bay-aquarium-data-mosaic-plot-preliminaries"
my_data <- data.frame(
time = c("S", "M", "L"),
dont_like = c(20, 7, 1),
neutral = c(35, 41, 4),
like = c(60, 114, 29))
# helper variables
my_data$total <- with(my_data, like + neutral + dont_like)
my_data$xmax <- with(my_data, cumsum(total))
my_data$xmin <- with(my_data, xmax - total)
my_data_long <- melt(my_data, c("time", "xmin", "xmax", "total"))
my_data_long$time <- factor(my_data_long$time, c("S", "M", "L"))
my_data_long1 <- ddply(my_data_long, .(time), transform, ymax = cumsum(value))
my_data_long1 <- ddply(my_data_long1, .(time), transform, ymin = ymax - value)
my_data_long1$ymin_std <- with(my_data_long1, (ymin / total) * 100)
my_data_long1$ymax_std <- with(my_data_long1, (ymax / total) * 100)
p1 <- ggplot(my_data_long, aes(x = time, y = value)) +
geom_bar(stat = "identity") +
scale_y_continuous(breaks = seq(0, 150, 50), expand = c(0, 0)) +
theme_classic() +
theme(axis.title = element_blank(),
axis.ticks.x = element_blank())
p1
p2 <- ggplot(my_data_long, aes(x = time, y = value, fill = variable)) +
geom_bar(stat = "identity", show_guide = FALSE) +
scale_fill_manual(values = c("grey20", "grey50", "grey80")) +
scale_y_continuous(breaks = seq(0, 150, 50), expand = c(0, 0)) +
theme_classic() +
theme(axis.title = element_blank(),
axis.ticks.x = element_blank())
p2
p3 <- ggplot(my_data_long1,
aes(ymin = ymin_std, ymax = ymax_std, xmin = xmin,
xmax = xmax, fill = variable)) +
geom_rect(colour = "black", show_guide = FALSE, width = 0.5) +
scale_fill_manual(values = c("grey20", "grey50", "grey80")) +
scale_x_continuous(breaks = c(60, 200, 295), labels = c("S", "M", "L")) +
scale_y_continuous(breaks = c(3, 30, 75),
labels = c("Don't\nLike", "Neutral", "Likes")) +
theme_bw() +
theme(panel.border = element_blank(),
axis.ticks = element_blank())
p3
p4 <- arrangeGrob(
p1, p2, ncol = 2, heights = c(0.5, 0.5))
p5 <- arrangeGrob(
p4, p3, nrow = 2, heights = c(0.4, 0.6),
main = textGrob(
"Fig 5.8 Montery Bay Aquarium Data:\nMosaic Plot Preliminaries",
just = "top", vjust = 0.75, gp = gpar(fontsize = 14, lineheight = 1,
fontface = "bold")))
p5
ggsave(paste0(this_base, ".png"),
p5, width = 6, height = 6)
## pedantic: missing space between bars
|
e104a9e7dc4812572686434103897704de528205
|
7ae05409cd3e2c2224500c9ea2f0aa37987c1984
|
/results/star_salmon/rseqc/read_duplication/rscript/WT_REP1.DupRate_plot.r
|
f9cf7df0243c68ca8b53f9a6fa61545de687cfc4
|
[] |
no_license
|
Mufeedmk4/ag-intro
|
9aceaf34bf9068e7faa6025c525c86cdecbfe76e
|
d07ea026c11ee4833e88704b6c92d523759187e2
|
refs/heads/main
| 2023-06-04T22:50:10.211141
| 2021-06-22T16:54:51
| 2021-06-22T16:54:51
| 372,893,244
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,525
|
r
|
WT_REP1.DupRate_plot.r
|
pdf('WT_REP1.DupRate_plot.pdf')
par(mar=c(5,4,4,5),las=0)
seq_occ=c(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,97,99,100,101,102,103,104,105,106,109,111,112,113,114,115,116,118,119,120,124,126,130,132,136,139,145,147,149,153,155,163,170,191,205,241,245,296,325,360)
seq_uniqRead=c(33628,10043,2337,1829,1004,846,635,543,394,381,300,231,205,182,165,172,123,121,108,104,89,77,87,66,74,62,63,44,44,36,38,47,32,38,29,30,21,29,30,23,25,21,21,16,18,13,16,17,16,18,10,13,7,9,8,7,9,5,4,6,10,6,5,13,5,3,5,12,12,3,7,5,4,5,5,3,3,1,4,2,1,2,1,4,1,4,2,1,4,1,3,2,2,2,1,2,4,1,2,1,1,2,1,1,2,1,3,1,1,1,1,2,1,2,1,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1)
pos_occ=c(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,109,110,111,112,113,114,115,117,118,120,122,123,124,125,127,128,129,130,131,133,136,137,139,141,144,148,150,151,158,160,161,164,167,171,173,176,179,180,184,213,216,239,260,280,321,356,381)
pos_uniqRead=c(19066,9045,2021,1661,924,795,564,483,399,356,291,246,210,209,155,159,153,127,130,108,100,73,88,69,82,58,79,67,46,45,48,47,32,40,34,44,39,39,25,29,26,26,30,24,25,20,14,20,14,14,18,31,15,15,8,15,13,14,12,12,11,9,1,9,4,6,9,8,12,4,8,4,8,9,4,7,6,4,2,8,3,5,4,3,3,3,7,2,3,5,4,5,5,1,4,1,4,3,3,3,2,2,1,3,2,2,2,2,2,1,1,1,1,2,1,1,2,1,1,2,2,1,2,2,2,1,3,2,1,1,1,1,2,2,1,1,1,1,1,1,2,1,2,1,1,1,1,1,1,1,1,1,1)
plot(pos_occ,log10(pos_uniqRead),ylab='Number of Reads (log10)',xlab='Occurrence of read',pch=4,cex=0.8,col='blue',xlim=c(1,500),yaxt='n')
points(seq_occ,log10(seq_uniqRead),pch=20,cex=0.8,col='red')
ym=floor(max(log10(pos_uniqRead)))
legend(300,ym,legend=c('Sequence-based','Mapping-based'),col=c('blue','red'),pch=c(4,20))
axis(side=2,at=0:ym,labels=0:ym)
axis(side=4,at=c(log10(pos_uniqRead[1]),log10(pos_uniqRead[2]),log10(pos_uniqRead[3]),log10(pos_uniqRead[4])), labels=c(round(pos_uniqRead[1]*100/sum(pos_uniqRead*pos_occ)),round(pos_uniqRead[2]*100/sum(pos_uniqRead*pos_occ)),round(pos_uniqRead[3]*100/sum(pos_uniqRead*pos_occ)),round(pos_uniqRead[4]*100/sum(pos_uniqRead*pos_occ))))
mtext(4, text = "Reads %", line = 2)
dev.off()
|
dfbc7ddfc8da147eed765bb0a622cf40f0a15ae5
|
c484d8f11ab6af74f8da0303dffac019f1aacdb2
|
/script/aggregate_clean_phys_data.R
|
31c2459d3fb2ab81f574a1978437f0b0d3ea5f81
|
[] |
no_license
|
nthun/nightmare_and_ans
|
ea343f485e5c15111f0e130f426e02a4e8212aa3
|
d473a34f4be3a2e948f23843b4621a607c2cdf33
|
refs/heads/master
| 2021-04-03T03:01:12.116418
| 2018-09-28T16:38:25
| 2018-09-28T16:38:25
| 124,545,367
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,264
|
r
|
aggregate_clean_phys_data.R
|
# Create one single clean file for the aggregated ECG and SCR data
dir = "all_data/converted/"
ecg_pattern = "_ECG.txt"
scr_pattern = "_SCR.txt"
# Aggregate all ECG data and write it in a single file
all_ecg <-
tibble(file = fs::dir_ls(dir, regexp = ecg_pattern)) %>%
mutate(name = str_replace(file, str_glue("{dir}(\\d+.*){ecg_pattern}$"),"\\1")) %>%
separate(name, into = c("id","session"), extra = "merge") %>%
mutate( data = map(file, ~read_csv(file = .x,
col_names = c("time", "ecg"),
col_types = "dd")))
all_ecg %>%
unnest() %>%
select(-file) %>%
write_csv("all_data/clean/all_ecg.csv")
# Aggregate all SCR data and write it in
all_scr <-
tibble(file = fs::dir_ls(dir, regexp = scr_pattern)) %>%
mutate(name = str_replace(file, str_glue("{dir}(\\d+.*){scr_pattern}$"),"\\1")) %>%
separate(name, into = c("id","session"), extra = "merge") %>%
mutate( data = map(file, ~read_csv(file = .x,
col_names = c("time", "scr"),
col_types = "dd")))
all_scr %>%
unnest() %>%
select(-file) %>%
write_csv("all_data/clean/all_scr.csv")
|
1e8947b71d0d6e8ec51c8f3b25a8e22cbb7bbeb5
|
9562a04eab487d910505119a8d82c6c202273fb8
|
/man/wordmargin.Rd
|
af049f90d28675f132b7dad5112b80ccf18b1b4c
|
[] |
no_license
|
markwestcott34/austin
|
847217831ce01b292cee8bc0a232e71c9a79116f
|
df1fa1271e01fce0a0d94e9ca5ff53c56b0d52a2
|
refs/heads/master
| 2021-01-23T04:10:15.423556
| 2017-05-31T07:55:29
| 2017-05-31T07:55:29
| 92,919,749
| 0
| 0
| null | 2017-05-31T07:51:38
| 2017-05-31T07:51:38
| null |
UTF-8
|
R
| false
| true
| 511
|
rd
|
wordmargin.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wfm.R
\name{wordmargin}
\alias{wordmargin}
\title{Which margin holds the words}
\usage{
wordmargin(x)
}
\arguments{
\item{x}{a word frequency matrix}
}
\value{
1 if words are rows and 2 if words are columns.
}
\description{
Checks which margin (rows or columns) of a Word Frequency Matrix holds the
words
}
\details{
Changing the wordmargin by assignment just swaps the dimnames
}
\author{
Will Lowe
}
\seealso{
\code{\link{wfm}}
}
|
f04290c5dade19e3f8e88fc03a99d1fa348b81ef
|
dbbc22c755b067dc2a494d1a3db27ae72186cda7
|
/man/gRbase-utilities.Rd
|
532086b541203833303a4212cdf352baa8756b92
|
[] |
no_license
|
velanati/gRbase
|
084ca847256ad9e304847c2900e012130cf12d3f
|
8f2ba1cc85a429a424e4259d006d7610927c4793
|
refs/heads/master
| 2021-01-22T16:25:16.328910
| 2015-08-21T00:00:00
| 2015-08-21T00:00:00
| null | 0
| 0
| null | null | null | null |
ISO-8859-15
|
R
| false
| false
| 1,701
|
rd
|
gRbase-utilities.Rd
|
\name{gRbase-utilities}
\alias{colwiseProd}
\alias{is_subsetof_}
\alias{get_superset_}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Utility functions for gRbase}
\description{
Utility functions for gRbase package. Includes 'faster versions' of
certain standard R functions.
}
%%\usage{
%% uniquePrim(x)
%% setdiffPrim(x,y)
%% intersectPrim(x,y)
%% unlistPrim(l)
%%}
%- maybe also 'usage' for other objects documented here.
%\arguments{
% \item{x, y}{Vectors}
% \item{l}{A list (of vectors)}
% \item{recursive}{logical. Should unlisting be applied to list components of x?}
% \item{use.names}{logical. Should names be preserved?}
%}
\details{
\code{colwiseProd} multiplies a vector and a matrix columnwise (as
opposed to rowwise which is achieved by \code{v*M}). Hence
\code{colwiseProd} does the same as \code{t(v*t(M))} - but it does so
faster for numeric values.
}
\value{
A vector or a logical.
}
%\references{ ~put references to the literature/web site here ~ }
\author{Søren Højsgaard, \email{sorenh@math.aau.dk}}
%% \note{
%% Use the xxxxPrim functions with caution!
%% %~Make other sections like Warning with \section{Warning }{....} ~
%% }
%% \seealso{
%% \code{\link{unique}}, \code{\link{setdiff}}, \code{\link{unlist}}
%% }
\examples{
## colwiseProd
M <- matrix(1:16, nrow=4)
v <- 1:4
t(v*t(M))
colwiseProd(v,M)
system.time(for (ii in 1:100000) t(v*t(M)))
system.time(for (ii in 1:100000) colwiseProd(v,M))
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{utilities}
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
1a4e29d4b1736f9709a8c11a02bb1aea1dcdac69
|
48d2c8117c4604e32bef0752f16447641bd82718
|
/electability/R/LinearElectionModel.R
|
035f92058932d59783625bac70d936a0f92451d1
|
[
"MIT",
"CC-BY-4.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
tmcintee/electability-2020-pub
|
b3334cf5ada9c74a43f5cdc9bbb5742cfef290d1
|
5dd97241c7551633890020b4a5ce92eff78dc468
|
refs/heads/master
| 2020-12-13T09:14:24.949548
| 2020-01-16T17:23:56
| 2020-01-16T17:23:56
| 234,372,855
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,595
|
r
|
LinearElectionModel.R
|
LinearElectionModel <- function(df, ev_frame)
{
answer <- ev_frame %>%
select(State, Total.EV,Total,Democratic,Republican,Year)
answer$Variability <- 0
answer$Share.Democratic <- 0.5
answer$Share.Republican <- 0.5
answer$Weight <- 0
for(state in ev_frame$State)
{
print(state)
temp_df <- df %>%
select(State,
Weight,
Share.Democratic,
Share.Republican,
Year) %>%
filter(State == state)
target <- which(ev_frame$State == state)
answer$Variability[target] <- var(temp_df$Share.Democratic-temp_df$Share.Republican,na.rm = TRUE)
answer$Share.Democratic[target] <- lm(data = temp_df,
Share.Democratic ~ Year)[[1]][[1]] +
answer$Year[target]*lm(data = temp_df,
Share.Democratic ~ Year)[[1]][[2]]
answer$Share.Republican[target] <- lm(data = temp_df,
Share.Republican ~ Year)[[1]][[1]] +
answer$Year[target]*lm(data = temp_df,
Share.Republican ~ Year)[[1]][[2]]
answer$Weight[target] = sum(temp_df$Weight,na.rm = TRUE)
}
answer <- answer %>%
inner_join(ev_frame %>%
select(State, Total.EV,Total,Democratic,Republican)) %>%
mutate(Democratic.EV = Total.EV*(Share.Democratic > Share.Republican),
Republican.EV = Total.EV*(Share.Republican > Share.Democratic),
Democratic = Share.Democratic*Total*(Total > 0),
Republican = Share.Republican*Total*(Total > 0))
return(answer)
}
|
0632660f3bc60c547e3f15e25e79fb6ae98b8be6
|
81c72a71a68de09c1c78b1091186f613fcbaae6e
|
/figure/etFlux.Fig.dT.Summer.USA.R
|
18ddcdc1b59f92b5c71ffbdd8f366b507a265190
|
[] |
no_license
|
planttrt/etFlux
|
4ff06ff45d7f7dfa5e59345e9c56a364f24c8d7e
|
11be25df8a34b4a6aaa0e4db34414cd056ecce8a
|
refs/heads/master
| 2020-04-20T09:58:27.754429
| 2018-06-15T20:56:06
| 2018-06-15T20:56:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,042
|
r
|
etFlux.Fig.dT.Summer.USA.R
|
physio <- shapefile('data/physioProvinceLatLon/physioProvinceLatLon.shp')
# source('transAuxFuncs.R')
r <- sapply(dir('~/Box Sync/Home Folder/Private/DATA/DT/4K/NORM//', recursive = T, full.names = T), raster)
rng <- c(5,20)
# colList <- rev(colList.FunAndTropical)
# colList <- colList.Ideas4Homes
# colList <- colList.WarmAndRustic[c(2,4,1)]
colList <- c('#4897D8', '#ffffff','#C60000')
r <- rMean(r[6:9])
r [r< rng[1]] <- rng[1]
r [r> rng[2]] <- rng[2]
png('figure/etFlux.Fig.dT.Summer.USA.png', width = 6.5, height = 3.5, res = 300, units = 'in')
par(mar=c(0,0,2,0), bty='n')
plot(r, axes=F,
# xlim=c(-90.5,-74.5), ylim=c(25,40),
zlim=rng, #breaks=bks,
col=colorRampPalette(colList)(100))
map('usa', add = T)
plot(physio, add=T)
mtext('Thermal stress (∆T)', cex=2, font=2, line = 0)
mtext('(b)', cex=2, font=2, line = 0, adj=0)
scalebar(d = 1000, xy = c(-122, 27),type = 'bar', below = 'kilometers', divs = 4)
northArrow(xb = -72, yb = 28, len=1.5, lab="N", tcol = 'black', font.lab = 2, col='black')
dev.off()
|
0bcc6b6b5fa9b3638116c42c834e6fb15d9b7232
|
43a688d2c8f4fa45ac8569f48e94cc1213ddebe5
|
/man/positions.Rd
|
24be1bd2768ed5a54db88d340e47021b78ff2b43
|
[] |
no_license
|
cran/MaskJointDensity
|
a0c5316667697e7f5b79b9a6820c160f5f202db0
|
929b9710f19e2552d6a7e629a27cf3b85e5bffa2
|
refs/heads/master
| 2020-03-18T06:41:41.441830
| 2018-05-22T11:13:25
| 2018-05-22T11:13:25
| 134,410,302
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,175
|
rd
|
positions.Rd
|
\name{positions}
\alias{positions}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Function for finding the positions of the node representing the points at which to sample from the kernel density estimate.
}
\description{
Purely used by EQsampleDensity and sampleDensity
}
\usage{
positions(x, vectorL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
Placeholder
}
\item{vectorL}{
Vector of lengths of the number of nodes for each dimension
}
}
\references{
no references
}
\author{
Jordan Morris
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (x, vectorL)
{
backwardNumber <- 1
forwardNumber <- 1
for (i in 1:length(vectorL)) {
backwardNumber <- backwardNumber * vectorL[i]
}
backwardNumber <- backwardNumber/vectorL[1]
y1 <- rep(x[1:vectorL[1]], each = backwardNumber)
y <- y1
forwardNumber <- forwardNumber * vectorL[1]
if (length(vectorL) - 2 <= 0) {
y1 <- rep(x[vectorL[length(vectorL) - 1] + 1:vectorL[length(vectorL)]],
forwardNumber)
y <- cbind(y, y1)
}
else {
a1 <- 0
b1 <- 0
for (i in 1:(length(vectorL) - 2)) {
backwardNumber <- backwardNumber/vectorL[i + 1]
a1 <- a1 + vectorL[i]
b1 <- a1 + vectorL[i + 1]
a <- a1 + 1
b <- b1
y1 <- rep(x[a:b], each = backwardNumber)
y1 <- rep(y1, forwardNumber)
forwardNumber <- forwardNumber * vectorL[i + 1]
y <- cbind(y, y1)
}
a1 <- a1 + vectorL[length(vectorL) - 1]
b1 <- a1 + vectorL[length(vectorL)]
a <- a1 + 1
b <- b1
y1 <- rep(x[a:b], forwardNumber)
y <- cbind(y, y1)
}
return(y)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
613325071e614ef184e3b340fef07879894d6830
|
9482668bd3d152147d920c8bb252c8bc4ee04ef7
|
/What.R
|
e5a0deb5e77c41291bdcbc6f4ef0f53a18f12c3f
|
[] |
no_license
|
fredcommo/FredScript
|
315ecc803a93649aa38f29d49e4ccb143be6c4cc
|
e618fdf893163e5a5516ae59652db1b7b9b5700d
|
refs/heads/master
| 2016-09-07T22:45:20.572252
| 2013-06-27T01:25:36
| 2013-06-27T01:25:36
| null | 0
| 0
| null | null | null | null |
ISO-8859-2
|
R
| false
| false
| 935
|
r
|
What.R
|
What<-function(train,...)
{
n<-dim(train)[1]; # nombre de lignes (individus)
p<-dim(train[,-1])[2]; # nombre de v.a.
table_class<-table(train$letter); # nombre d'indiv. par classe letter
k<-dim(table_class); # nombre de classes
prop<-table_class/n # proportions (nk/n)
# centrage des groupes: (xi-ľk)
splitdata<-split(train,train$letter); # regroupe par classe
split_cr<-splitdata
for(i in 1:length(splitdata))
split_cr[[i]]<-scale(splitdata[[i]][-1],scale=F);
# Calcul des variances sigmak de chaque classe k: sigma_k=(1/nk)*t(xi-ľk)(xi-ľk)
sigmak<-list(matrix(0,p,p));
for(i in 1:k)
sigmak[[i]]<-(1/table_class[i])*t(as.matrix(split_cr[[i]]))%*%(as.matrix(split_cr[[i]]));
# Estimation de la variance intra-classe: Somme(k)[(nk/n)*sigma_i]
W_hat<-matrix(0,p,p);
for(i in 1:k)
W_hat<-W_hat+prop[i]*sigmak[[i]];
W_hat;
}
|
4ae41733e3aa808bcbb8c5079cae0ddc0bbfa24f
|
377a111fb7585caf110377625ad0e261a44e93ed
|
/herbie/rj/organ.r
|
e0f8d859040875bfdc56de455659dc51f9a67536
|
[] |
no_license
|
gasduster99/theses
|
ba385499ea0a502b9067c243f7100d82ff1ef4ce
|
9c5f8b5d7564292e2e36547ed8d5563a8639f158
|
refs/heads/master
| 2023-08-25T18:14:09.741032
| 2023-08-22T05:33:06
| 2023-08-22T05:33:06
| 217,375,576
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 243
|
r
|
organ.r
|
organ = function(mixedList){
out = list()
for (j in seq(1, max(k))){out[[j]]=numeric(0)}
for(ind in seq(1, length(mixedList))){
jHat = length(mixedList[[ind]])
out[[jHat]] = rbind(out[[jHat]], mixedList[[ind]])
}
return(out)
}
|
8e5e3c63968810850ba405be5febbc7befb721e2
|
8e95e6f1e63e43abf6c37d1c30fc1dc511c68287
|
/Data_prep/remove_probe_id_from_gene_subsets.R
|
4b64c05b3d133d61477a6a7125a112375dc545c8
|
[] |
no_license
|
stcolema/ra_chris_wallace
|
d5f849e9734d0fdaf86fec99cd31ec020649cdf2
|
69bfcbc2ed9cb42039bcef45498a1357becee5a9
|
refs/heads/master
| 2022-12-07T12:12:39.033518
| 2019-08-01T16:10:48
| 2019-08-01T16:10:48
| 169,764,705
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 516
|
r
|
remove_probe_id_from_gene_subsets.R
|
# To remove the V1 columm from gene subset data
library(magrittr)
library(data.table)
home_dir <- "~/Desktop/ra_chris_wallace/Data/"
dirs_to_read <- c("Big_gene_set", "Med_gene_set", "Small_gene_set") %>%
paste0(home_dir, .)
for(curr_dir in dirs_to_read){
files_present <- list.files(path = curr_dir) %>%
grep("vsn_*", ., value = T) %>%
paste(curr_dir, ., sep = "/")
for(f in files_present){
curr_dt <- fread(f, header = T) %>%
.[, -1]
fwrite(curr_dt, file = f)
}
}
|
e219188877686c85470562487cb42a09b369be96
|
158af21f249555f32e12889c7344f06042120748
|
/man/convertEnsemblToGeneSymbol.Rd
|
d5370b63077345db31e5e000feaba1bbf37dbe18
|
[
"MIT"
] |
permissive
|
RubD/Giotto
|
cd441655913246190f5097d4fbff869a9e8d7d0a
|
3e6671a2512484a7b90b421b7e697d1abc2ec760
|
refs/heads/master
| 2023-09-01T06:21:39.024110
| 2023-04-19T14:34:40
| 2023-04-19T14:34:40
| 547,482,695
| 8
| 5
|
MIT
| 2023-04-04T17:56:36
| 2022-10-07T19:03:41
|
R
|
UTF-8
|
R
| false
| true
| 641
|
rd
|
convertEnsemblToGeneSymbol.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/general_help.R
\name{convertEnsemblToGeneSymbol}
\alias{convertEnsemblToGeneSymbol}
\title{convertEnsemblToGeneSymbol}
\usage{
convertEnsemblToGeneSymbol(matrix, species = c("mouse", "human"))
}
\arguments{
\item{matrix}{an expression matrix with ensembl gene IDs as rownames}
\item{species}{species to use for gene symbol conversion}
}
\value{
expression matrix with gene symbols as rownames
}
\description{
This function convert ensembl gene IDs from a matrix to official gene symbols
}
\details{
This function requires that the biomaRt library is installed
}
|
3c25e51a51b7a7fbc5573291c19d4fb8208faf0f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/edgar/examples/get8KItems.Rd.R
|
094b3eeeeb8dc1b2f76dc75300cc591ffdbe48b8
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 413
|
r
|
get8KItems.Rd.R
|
library(edgar)
### Name: get8KItems
### Title: Retrieves Form 8-K event information
### Aliases: get8KItems
### ** Examples
## Not run:
##D
##D output <- get8KItems(cik.no = 38079, filing.year = 2005)
##D ## Returns 8-K events information for CIK '38079' filed in year 2005.
##D
##D output <- get8KItems(cik.no = c(1000180,38079),
##D filing.year = c(2005, 2006))
## End(Not run)
|
6a70b26595353a258977fed9702cb7590b1ef3df
|
49d98693d18439ea60fa69b4a9e517dab49ea4a5
|
/P1/nvolverorigen.R
|
bdc8a27428863e4f248e8bc3ed36193526feb01d
|
[] |
no_license
|
VictorOrtiz0320/SimulacionComputacional
|
24f2cec6c648bdc4dc1a461d272813d8f542059a
|
0dcfd683b1005ff531ef99ce4b0bd66764157b11
|
refs/heads/master
| 2021-01-15T16:47:30.521679
| 2017-12-12T17:39:06
| 2017-12-12T17:39:06
| 99,720,927
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 855
|
r
|
nvolverorigen.R
|
dur=1000
contador1=0
contador2=0
contador3=0
for (dim in 1:3) {
pos=rep(0,dim)
origen=c(pos)
contador=0
for (t in 1:dur) {
cambiar <- sample(1:dim, 1)
cambio <- 1
if (runif(1) < 0.5) {
cambio <- -1
}
pos[cambiar]= pos[cambiar]+ cambio
if (all (pos==origen)){
contador=contador +1 }
else{
contador=contador
}
print (pos)
}
print ("N. veces que vuelve al origen")
print (contador)
if (dim==1){
contador1=contador1+ contador
}
if(dim==2){
contador2=contador2+ contador
}
if (dim==3){
contador3=contador3+ contador
}
}
contador1
contador2
contador3
datos=data.frame()
dimensiones=sort(sample(1:dim))
norigen=c(contador1,contador2,contador3)
datos=rbind(norigen,dimensiones)
barplot(norigen,main="Practica 1", xlab="Dimensiones", ylab="N. veces vuelve al origen",names.arg=dimensiones)
|
c665bc31b68702cfee00414524ec5db6182fd3d6
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/gets/examples/ES.Rd.R
|
7ab02c97de66ef4e50711222927db57046c16175
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 573
|
r
|
ES.Rd.R
|
library(gets)
### Name: ES
### Title: Conditional Value-at-Risk (VaR) and Expected Shortfall (ES)
### Aliases: ES VaR
### Keywords: Statistical Models Time Series Econometrics Financial
### Econometrics
### ** Examples
##generate random variates, estimate model:
y <- rnorm(50)
mymodel <- arx(y, arch=1)
##extract 99% expected shortfall:
ES(mymodel)
##extract 99%, 95% and 90% expected shortfalls:
ES(mymodel, level=c(0.99, 0.95, 0.9))
##extract 99% value-at-risk:
VaR(mymodel)
##extract 99%, 95% and 90% values-at-risk:
VaR(mymodel, level=c(0.99, 0.95, 0.9))
|
72969e7d70f906d8a54813159d2ab37d3c667fa7
|
8deba66229f92b0280e2a5281a71aff5a91ee8b4
|
/miscellany/R_scripts/amend_wintimes.R
|
112419c921f3322c9317bfd344f7b4e3874b05dc
|
[] |
no_license
|
hullboy73/deepmine
|
e57b63734de1cb41f3e3520592e85e52450b83bc
|
e2f3bac3e869b7def43f97296b50ee94f22e7ffa
|
refs/heads/master
| 2020-05-29T11:46:35.955508
| 2016-09-21T03:09:57
| 2016-09-21T03:09:57
| 68,774,609
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 519
|
r
|
amend_wintimes.R
|
# amend_wintimes.R
library(dplyr)
wintimes <- age_wintimes
min_wintimes <- aggregate(wintimes$wintime, by=list(wintimes$distance, wintimes$racecourse, wintimes$raceclass), FUN=min)
mean_wintimes <- aggregate(wintimes$wintime, by=list(wintimes$distance, wintimes$racecourse, wintimes$raceclass), FUN=mean)
wintimes_95 <- aggregate(wintimes$wintime, by=list(wintimes$distance, wintimes$racecourse, wintimes$raceclass), function(x) quantile(c(x),probs=c(0,.05), na.rm=TRUE))
# write.csv(wintimes_95, 'wintimes_95.csv')
|
383b1d138bb04dde074fc5c5e433ab7fd08c0e6f
|
40924a950c155e3ba69b54b8e749a892cf0c1d84
|
/Getting and Cleaning data/Quiz 4/1.R
|
c10ee9bf3405cc1f508eddb691c1eb72a20b4878
|
[] |
no_license
|
giu87/datasciencecoursera
|
cc7e5e21539e53d5efbef4cc7c3126994a8d08dd
|
f54a4f681c81b05ffc87f295d8b031eca52f822e
|
refs/heads/master
| 2020-03-30T07:17:36.990255
| 2016-11-20T17:43:54
| 2016-11-20T17:43:54
| 19,123,973
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 239
|
r
|
1.R
|
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv", "getdata%2Fdata%2Fss06hid.csv")
data <- read.csv("getdata%2Fdata%2Fss06hid.csv")
x <- names(data)
x_splitted <- strsplit(x, "wgtp")
print(x_splitted[123])
|
7d035cfba88e4be97b0bb8b1403ad7b8f9839b13
|
e68e99f52f3869c60d6488f0492905af4165aa64
|
/man/linalg_eigvalsh.Rd
|
db1825035277d1e99853c79c4812340957c99480
|
[
"MIT"
] |
permissive
|
mlverse/torch
|
a6a47e1defe44b9c041bc66504125ad6ee9c6db3
|
f957d601c0295d31df96f8be7732b95917371acd
|
refs/heads/main
| 2023-09-01T00:06:13.550381
| 2023-08-30T17:44:46
| 2023-08-30T17:44:46
| 232,347,878
| 448
| 86
|
NOASSERTION
| 2023-09-11T15:22:22
| 2020-01-07T14:56:32
|
C++
|
UTF-8
|
R
| false
| true
| 2,919
|
rd
|
linalg_eigvalsh.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/linalg.R
\name{linalg_eigvalsh}
\alias{linalg_eigvalsh}
\title{Computes the eigenvalues of a complex Hermitian or real symmetric matrix.}
\usage{
linalg_eigvalsh(A, UPLO = "L")
}
\arguments{
\item{A}{(Tensor): tensor of shape \verb{(*, n, n)} where \code{*} is zero or more batch dimensions
consisting of symmetric or Hermitian matrices.}
\item{UPLO}{('L', 'U', optional): controls whether to use the upper or lower triangular part
of \code{A} in the computations. Default: \code{'L'}.}
}
\value{
A real-valued tensor cointaining the eigenvalues even when \code{A} is complex.
The eigenvalues are returned in ascending order.
}
\description{
Letting \teqn{\mathbb{K}} be \teqn{\mathbb{R}} or \teqn{\mathbb{C}},
the \strong{eigenvalues} of a complex Hermitian or real symmetric matrix \teqn{A \in \mathbb{K}^{n \times n}}
are defined as the roots (counted with multiplicity) of the polynomial \code{p} of degree \code{n} given by
}
\details{
\Sexpr[results=rd, stage=build]{torch:::math_to_rd("
p(\\\\lambda) = \\\\operatorname{det}(A - \\\\lambda \\\\mathrm{I}_n)\\\\mathrlap{\\\\qquad \\\\lambda \\\\in \\\\mathbb{R}}
")}
where \teqn{\mathrm{I}_n} is the \code{n}-dimensional identity matrix.
The eigenvalues of a real symmetric or complex Hermitian matrix are always real.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if \code{A} is a batch of matrices then
the output has the same batch dimensions.
The eigenvalues are returned in ascending order.
\code{A} is assumed to be Hermitian (resp. symmetric), but this is not checked internally, instead:
\itemize{
\item If \code{UPLO}\ \verb{= 'L'} (default), only the lower triangular part of the matrix is used in the computation.
\item If \code{UPLO}\ \verb{= 'U'}, only the upper triangular part of the matrix is used.
}
}
\examples{
if (torch_is_installed()) {
a <- torch_randn(2, 2)
linalg_eigvalsh(a)
}
}
\seealso{
\itemize{
\item \code{\link[=linalg_eigh]{linalg_eigh()}} computes the full eigenvalue decomposition.
}
Other linalg:
\code{\link{linalg_cholesky_ex}()},
\code{\link{linalg_cholesky}()},
\code{\link{linalg_det}()},
\code{\link{linalg_eigh}()},
\code{\link{linalg_eigvals}()},
\code{\link{linalg_eig}()},
\code{\link{linalg_householder_product}()},
\code{\link{linalg_inv_ex}()},
\code{\link{linalg_inv}()},
\code{\link{linalg_lstsq}()},
\code{\link{linalg_matrix_norm}()},
\code{\link{linalg_matrix_power}()},
\code{\link{linalg_matrix_rank}()},
\code{\link{linalg_multi_dot}()},
\code{\link{linalg_norm}()},
\code{\link{linalg_pinv}()},
\code{\link{linalg_qr}()},
\code{\link{linalg_slogdet}()},
\code{\link{linalg_solve}()},
\code{\link{linalg_svdvals}()},
\code{\link{linalg_svd}()},
\code{\link{linalg_tensorinv}()},
\code{\link{linalg_tensorsolve}()},
\code{\link{linalg_vector_norm}()}
}
\concept{linalg}
|
ebaa430e8736f4b19053011cde5de8bff1fb5bd9
|
b20d447cf7cfd90c3e295c35a21c168cd2b1c451
|
/man/expected_textures_5x7_2x3.Rd
|
74c7f39d80a6bf527786c38d9a56c69c36eaa151
|
[] |
no_license
|
azvoleff/glcm
|
e584c5c8200502d11d841baea867529de6c1ea4d
|
3fadf27e1b6120d05476d62fa1ce3b38c3249152
|
refs/heads/master
| 2021-01-02T09:16:00.558599
| 2020-02-26T21:44:22
| 2020-02-26T21:44:22
| 16,706,182
| 16
| 6
| null | 2015-11-23T13:14:24
| 2014-02-10T19:15:28
|
R
|
UTF-8
|
R
| false
| true
| 776
|
rd
|
expected_textures_5x7_2x3.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glcm-package.R
\docType{data}
\name{expected_textures_5x7_2x3}
\alias{expected_textures_5x7_2x3}
\title{GLCM textures calculated in EXELIS ENVI (for testing purposes)}
\description{
This is the output from running a "co-occurrence measures" calculation to
calculate GLCM textures in EXELIS ENVI from the \code{test_raster} included
in the \code{glcm} package. The following settings were used: window size
5x7; co-occurrence shift 2 rows (y in ENVI), 3 columns (x in ENVI);
greyscale textures to compute: mean, variance, homogeneity, contrast,
dissimilarity, entropy, second moment, correlation.
}
\seealso{
\code{\link{expected_textures_3x3_1x1}}
\code{\link{expected_textures_5x3_n1xn2}}
}
|
9cd5097a630e30db44ed701e3026e986995c9639
|
0bcb0836e2870b7be4072110f0b41c1b61afd4b3
|
/ProgrammingAssignment3/rankall.R
|
807399bfea658ec018c0812a0cc1e9737b247b48
|
[] |
no_license
|
sproddle/datasciencecoursera
|
17b57443f053d2b053b8c53352a53c18a1506d22
|
f8c782de3b2a6d1bf654e49d40b801d5c1485fb2
|
refs/heads/master
| 2021-01-21T13:11:53.754722
| 2016-05-15T01:31:09
| 2016-05-15T01:31:09
| 49,173,127
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,952
|
r
|
rankall.R
|
##best.R
## takes two arguments: the 2-character abbreviated name of a state and an outcome name.
##The outcomes can be one of "heart attack", "heart failure", or "pneumonia".
## The function reads the outcome-of-care-measures.csv file and returns a character vector
## with the name of the hospital that has the best (i.e. lowest) 30-day mortality for the specified outcome in that state.
rankall <- function(outc,num="best") {
## Read outcome data
## cols are:
## [2] "Hospital.Name"
## [7] "State"
## [11] "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack"
## [17] "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure"
## [23] "Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia"
## Read outcome data
## Convert to numeric
## Clean up data
outcome <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
outcome[outcome=="Not Available"|outcome=="NA"]<-NA
df <- data.frame( outcome[,2] , outcome[,7],as.numeric(outcome[,11]),as.numeric(outcome[,17]),as.numeric(outcome[,23]))
names(df)<-c("hospital", "state","heart_attack", "heart_failure", "pneumonia")
df[df=="Not Available"|df=="NA"]<-NA
## Replace space with _
outc <- gsub('([[:punct:]])|\\s+','_',outc)
##DEFINE FUNCTION TO RETURN required VALUES
ret_min <- function( state, colname, num)
{
colname <- paste("hosp$", colname,sep = "")
hosp <- df[df$state==state,]
hosp <-hosp[order(eval(parse(text =(colname))),hosp$hospital), ]
a<- data.frame(hosp$hospital, eval(parse(text =(colname))))
if ( num=="best" ) {
ret<-toString(a[1,1])
}
else if ( is.numeric(num) ){
ret<-toString(a[num,1])
}
else if ( num=="worst" ) { n <- nrow(a)
ret<-toString(a[n,1])
}
}
## Check that state and outcome are valid
if ( sum(colnames(df)[3:5] == outc)==0) {
message("Invalid outcome")
return(NULL)
}
if ( is.numeric(num) || num=="best" || num=="worst") {
TRUE==TRUE}
else
{message("Invalid num")
return(NULL) }
##LOOP Through each state
us <- unique(df$state)
for (st in sort(us) )
{
hosp_name <- ret_min(st, outc, num)
##print (hosp_name)
#rowa<- c(st,hosp_name)
if (exists("df_res") ){
df_res<- rbind(df_res, data.frame(hosp_name,st))
}
else {df_res<- data.frame(hosp_name, st)
}
}
names(df_res)<-c("hospital", "state")
df_res
}
|
dfd4613bb30b726f1d861ed3ac24ba576055bc05
|
5e3011b1de8bbb6e2a0e092eb01b0b1ce678a4d6
|
/tests/testthat/test_fill_in_zeros.R
|
7bcbfd3b973b3297e6c57fd07ae55b870b47dc5f
|
[
"MIT"
] |
permissive
|
evanjflack/cfo.behavioral
|
a95a81bd89903f4b876522331860b607fa08b83b
|
b10e451026910c48a08c3bdda011bde039250959
|
refs/heads/master
| 2023-02-23T09:08:59.557128
| 2020-10-06T19:13:57
| 2020-10-06T19:13:57
| 227,146,208
| 0
| 0
|
NOASSERTION
| 2020-08-03T16:20:28
| 2019-12-10T14:50:50
|
R
|
UTF-8
|
R
| false
| false
| 448
|
r
|
test_fill_in_zeros.R
|
# ------------------------------------------------------------------------------
# Tests the fill_in_zeros function
DT_id <- data.table(id = c("1", "2", "3"))
DT <- data.table(id = c("1", "2"),
x = rnorm(2))
DT_exp <- DT %>%
rbind(data.table(id = "3", x = 0)) %>%
.[order(id), ]
DT_new <- fill_in_zeros(DT, DT_id, "id") %>%
.[order(id), ]
test_that("Test fill_in_zeros", {
expect_equal(mean(DT_new == DT_exp), 1)
})
|
68557740fe2df48519972024fe7c7eb6dbe35221
|
364c6bafaa9529fcf15851f859fdbc6cc45f7fc5
|
/r_package/AlloHubMat/R/generics.R
|
cd1b5893035f86041bd48583a216355b63c18dcf
|
[] |
no_license
|
jamieAmacpherson/allosteric_signal
|
842a5b446924a5b84b2aff589bcb6f51beabc8a1
|
297ef7cd79748d864a2a8f12ff5924064ebd4ed2
|
refs/heads/master
| 2021-10-12T04:49:27.252794
| 2019-02-01T18:13:05
| 2019-02-01T18:13:05
| 95,106,128
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,269
|
r
|
generics.R
|
#! /usr/bin/R
#===============================================================================
# Bayes variant detection
# (C) 2018 Jens Kleinjung
#===============================================================================
#_______________________________________________________________________________
#' sadata: An S4 class for Structural Alphabet (SA) data.
#' @slot fragment_letters: Prototype fragment letters forming SA
#' @slot fragment_coordinates: Coordinates of prototype fragments
#' @slot sa_trajectory: Stacked sequences (alignment) of SA encoded trajectory
#' @slot sa_blocks: Blocks of SA trajectory in a list
sadata <- setClass(
"sadata",
slots = c(
fragment_letters = "vector",
fragment_coordinates = "list",
sa_trajectory = "matrix",
sa_blocks = "list"
)
)
#_______________________________________________________________________________
#' sector: An S4 class for trajectory sectors.
#' @slot contingency_pos_ver: Contingency table of variant positions in vertical model
sector <- setClass(
"sector",
slots = c(
contingency_pos_ver = "vector"
)
)
#_______________________________________________________________________________
#' sector: An S4 class for hub residues.
#' @slot contingency_pos_ver: Contingency table of variant positions in vertical model
hub <- setClass(
"hub",
slots = c(
contingency_pos_ver = "vector"
)
)
#_______________________________________________________________________________
## generic functions from read_str_traj.R
#_______________________________________________________________________________
#
#' Read input structure
#'
#' \code{read_str_file}
#' reads a molecular structure file in PDB or GRO format.
#'
#' @param Input file name (including path) and format ("pdb" or "gro").
#' @return List of class "pdb".
#' @examples
#' pdb_object = read_str_file(str_filename, str_format)
#'
setGeneric("read_str_file", function(x, ...) standardGeneric("read_str_file"));
#_______________________________________________________________________________
#
#' Read input trajectory
#'
#' \code{read_traj_file}
#' reads a molecular trajectory file in DCD or XTC format.
#'
#' @param Input file name (including path) and format ("dcd" or "xtc").
#' @return .
#' @examples
#' trajectory_object = read_traj_file(traj_filename, traj_format, start, end)
#'
setGeneric("read_traj_file", function(x, y, a, b, ...) standardGeneric("read_traj_file"));
#_______________________________________________________________________________
## generic functions from MI.R
#_______________________________________________________________________________
#
#' Compute Mutual Information matrix for pairs of alignment columns
#'
#' \code{compute_mi_ali}
#' returns the metrics MI (Mutual Information), FSE (Finite Size Error), JE (Joint Entropy)
#' and nMI (normalised Mutual Information) for column pairs of a given alignment.
#' nMI is derived from the equation $nMI = (MI - FSE) / JE$.
#'
#' @param A character matrix.
#' @return A MI matrix.
#' @examples
#' variBay_object = compute_mi_ali(matrix)
#'
setGeneric("compute_mi_ali", function(x, ...) standardGeneric("compute_mi_ali"));
#===============================================================================
|
218dbf7a8f575fa0d991edc89bb27a51b1a9fedc
|
3fe1517654896fb0e0e821380c907660195b2e0f
|
/tests/testthat/test_plot_sampling.R
|
a509e605dbaad6afe0eaa4765402a280c2e107ce
|
[] |
no_license
|
eliotmiller/metricTester
|
9462035d8342e49d766ec37463cd27c2090c85c1
|
976d9b43773f1a06bc0254d3f355d2ee9f4be659
|
refs/heads/master
| 2020-04-06T06:30:42.528226
| 2019-12-12T20:38:53
| 2019-12-12T20:38:53
| 11,936,920
| 8
| 5
| null | 2017-03-21T15:00:28
| 2013-08-06T23:25:26
|
R
|
UTF-8
|
R
| false
| false
| 1,754
|
r
|
test_plot_sampling.R
|
library(metricTester)
context("Functions related to sampling plots from arenas")
tree <- geiger::sim.bdtree(b=0.1, d=0, stop="taxa", n=50)
temp <- evolveTraits(tree)
prepped <- prepSimulations(tree, arena.length=300, mean.log.individuals=4,
length.parameter=5000, sd.parameter=50, max.distance=20, proportion.killed=0.2,
competition.iterations=3)
singleArena <- filteringArena(prepped)
bounds <- plotPlacer(no.plots=20, arena.length=300, plot.length=sqrt(1000))
cdm <- plotContents(singleArena$arena, bounds)
#make a simple for loop to run through the bounds and see if any are overlapping.
#make an empty vector to save errors into
error <- c()
for(i in 1:dim(bounds$plot.bounds)[1])
{
for(j in 1:dim(bounds$plot.bounds)[1])
{
#if X1 is bigger than another X1 and less than the corresponding X2, and if
#Y1 is bigger than another Y1 and less than the corresponding Y2, then there is
#a problem
if(any(bounds$plot.bounds[i,1] > bounds$plot.bounds[j,1] &
bounds$plot.bounds[i,1] < bounds$plot.bounds[j,2] &
bounds$plot.bounds[i,3] > bounds$plot.bounds[j,3] &
bounds$plot.bounds[i,3] < bounds$plot.bounds[j,4]))
{
#turn error to TRUE and break the for loop, or else it will get written over
error[i] <- TRUE
break;
}
else
{
error[i] <- FALSE
}
}
}
test_that("Plots are sampled and returned in appropriate format",
{
#cdm should be in matrix format
expect_is(cdm$picante.cdm, "matrix")
#plots without any species are cut, so just confirm there are at least some rows
#species that do not occur are still in cdm, so there should be fifty columns
expect_true(dim(cdm$picante.cdm)[1] > 1)
expect_true(dim(cdm$picante.cdm)[2] == 50)
})
test_that("Plots are non-overlapping",
{
expect_false(any(error))
})
|
37f3f2699402b3d431d7b18fc8ceeda67ddec811
|
c986e8d48f8c5142392d48df0575901fdf62894b
|
/ScientificDataAnalysis/CPS/cps12k.R
|
e027d510a38d7da81532356ceff36d9ea1e084d5
|
[
"MIT"
] |
permissive
|
subond/Temperature12k
|
2d2ee515d16fc103f149ed169abcae841163492e
|
ea854570be681680dc724fd6b2f31c0f4667483c
|
refs/heads/master
| 2022-10-07T02:37:14.488906
| 2020-06-10T16:54:30
| 2020-06-10T16:54:30
| 276,101,973
| 0
| 1
| null | 2020-06-30T13:09:43
| 2020-06-30T13:09:42
| null |
UTF-8
|
R
| false
| false
| 8,363
|
r
|
cps12k.R
|
library(geoChronR) #devtools::install_github("nickmckay/geoChronR")
library(lipdR) #devtools::install_github("nickmckay/lipd-utilities",subdir = "R")
library(purrr)
library(magrittr)
library(ggplot2)
library(compositeR)#devtools::install_github("nickmckay/compositeR")
library(foreach)
library(doParallel)
#load database
setwd("/Users/npm4/GitHub/Temperature12k/ScientificDataAnalysis/CPS")
td <- getwd()
#load("~/Dropbox/Temp12kSerialization/Temp12k/expandedMasterDatabase/newEnsemblesIn.RData")
D <- readLipd("../lipdFilesWithEnsembles/")
setwd(td)
#extract timeseries
TS <- extractTs(D)
#filter timeseries
sg <- pullTsVariable(TS,variable = "interpretation1_seasonalityGeneral")
ic <- pullTsVariable(TS,"paleoData_inCompilation")
#filter by compilation and seasonality
te <- which(tolower(ic) == "temp12kensemble")
gsg <- which(tolower(sg) == "annual" | tolower(sg) == "summeronly" | tolower(sg) == "winteronly")
tu <- intersect(te,gsg)
fTS <- TS[tu]
#quick quality control, shouldn't be necessary
# ls <- map_dbl(fTS,function(x) sum(!is.na(x$paleoData_values) & !is.na(x$age)))
# ls2 <- map_dbl(fTS,function(x) length(x$paleoData_values))
# fTS <- fTS[which(ls > 10 & ls2 >10)]
#bin the TS
binvec <- seq(-50, to = 12050, by = 100)
binAges <- rowMeans(cbind(binvec[-1],binvec[-length(binvec)]))
# # Latitudinal gradients ---------------------------------------------------
#
#
#setup ensemble
nens <- 500
#composite lat bins
latbins <- seq(-90,90,by = 30)
lat <- pullTsVariable(fTS,"geo_latitude")
#load in scaling data
targets <- list.files(".",pattern = "PAGES2k",full.names = TRUE)
targetsShort <- list.files(".",pattern = "PAGES2k",full.names = FALSE)
sw <- 100 #pages 2k scaling window
targ <- purrr::map(targets,read.csv)
scaled <- comps <- counts <- c()
ensOut <- vector(mode = "list",length = nens)
registerDoParallel(4)
allLatBins <- vector(mode = "list",length = 4)
allLatBins[[1]] <- seq(-90,90,by = 30)
allLatBins[[2]] <- c(-90,-30,0,30,90)
allLatBins[[3]] <- c(-90,0,90)
allLatBins[[4]] <- c(-90,90)
#for(alb in 1){
alb <- 1
latbins <- allLatBins[[alb]]
ensOut[[alb]] <- foreach(i = 1:nens) %dopar% {
scaled <- c()
for(lb in 1:(length(latbins)-1)){
fi <- which(lat > latbins[lb] & lat <= latbins[lb+1])
tc <- compositeEnsembles(fTS[fi],binvec,spread = TRUE,duration = 3000, searchRange = c(0,7000),gaussianizeInput = FALSE,ageVar = "ageEnsemble")
#tc <- compositeEnsembles(fTS[fi],binvec,spread = spread,...)
comps <- cbind(comps,tc$composite)
counts <- cbind(counts,tc$count)
# thisTarget <- which(grepl(targets,pattern = paste0(latbins[lb],"to",latbins[lb+1])))
thisTarget <- which(stringr::str_starts(string = targetsShort, paste0(latbins[lb] ,"to", latbins[lb+1],"-scaleWindow",sw,"-PAGES2k.csv")))
if(length(thisTarget) != 1){
stop("target matching problem")
}
thisScaled <- scaleComposite(composite = tc$composite,binvec = binvec,scaleYears = 1950-targ[[thisTarget]][,1],scaleData = targ[[thisTarget]][,-1],scaleWindow = 1950-c(0,2000))
scaled <- cbind(scaled,thisScaled)
}
#weight by areas
zonalWeights <- sin(latbins[-1]*pi/180)-sin(latbins[-length(latbins)]*pi/180)
zonalWeights <- zonalWeights/sum(zonalWeights)
zonalNames <- stringr::str_c(latbins[-1]," to ",latbins[-length(latbins)])
scaledDf <- as.data.frame(scaled)
names(scaledDf) <- zonalNames
scaledDf$year <- binAges
scaledDf$GlobalMean <- rowSums(t(t(scaled)*zonalWeights))
#scaledDf$counts <- scaled[,ncol(scaled)]
return(scaledDf)
# ensOut[[i]] <- scaledDf
# print(i)
}
save(list = c("ensOut"),file = "12kensOut.RData")
test <- ensOut[[1]][[1]]
for(i in 2:nens){
test <- test + ensOut[[1]][[i]]
}
#plotting!
allLatMeans <- map_dfc(ensOut[[alb]],extract2,"GlobalMean")
#allCounts <- map_dfc(ensOut[[alb]],extract2,"counts")
#write out data
settings <- paste0(nens,"-",length(latbins)-1,"bands-",sw,"yr2kwindow")
readr::write_csv(path = paste0("globalMean",settings,".csv"),x = allLatMeans)
for(lb in 1:(length(latbins)-1)){
lbn <- paste0(latbins[lb],"to",latbins[lb+1])
out <- cbind(binAges,as.matrix(map_dfc(ensOut[[alb]],extract2,lb)))
write.csv(file = paste0(lbn,settings,".csv"),x = out)
}
globMean <- plotTimeseriesEnsRibbons(X = ensOut[[1]][[1]]$year,Y = as.matrix(allLatMeans),x.bin = seq(-1,12000,by = 10),alp = 0.5,colorHigh = "red",colorLow = "white",lineColor = "maroon")+
scale_x_reverse(name = "Year (BP)",breaks = seq(0,12000,2000),oob = scales::squish)+
scale_y_continuous(name = "Temperature (deg C) (wrt 1000-2000 AD)",limits = c(-5,2.5),oob = scales::squish)+
theme_bw()+
ggtitle("Global Mean Temperature (Composite Plus Scale)")
ggsave(filename = "oldvnew.png",globMeanNew,width = 5,height = 4 )
globMeanNew <- globMeanOrig %>% plotTimeseriesEnsRibbons(X = ensOut[[1]][[1]]$year,Y = as.matrix(allLatMeans),x.bin = seq(-1,2000,by = 10),alp = 0.5,colorHigh = "red",colorLow = "white",lineColor = "maroon")+
scale_x_reverse(name = "Year (BP)",breaks = seq(0,12000,2000),oob = scales::squish,limits = c(2000,0))+
scale_y_continuous(name = "Temperature (deg C) (wrt 1000-2000 AD)",limits = c(-1,1),oob = scales::squish)+
theme_bw()+
ggtitle("Global Mean Temperature (Composite Plus Scale)")
ggsave(filename = "oldvnew2.png",globMeanNew,width = 5,height = 4 )
# globMean <- plotTimeseriesEnsRibbons(X = ensOut[[1]][[1]]$year,Y = as.matrix(allLatMeans),x.bin = seq(-1,12000,by = 10))+
# scale_x_reverse(name = "Year (BP)",breaks = seq(0,12000,2000),oob = scales::squish)+
# scale_y_continuous(name = "Temperature (deg C) (wrt 1000-2000 AD)",limits = c(-10,5),oob = scales::squish)+
# theme_bw()+
# ggtitle("Global Mean Temperature (Composite Plus Scale)")
# globMean
ggsave(filename = paste0("GlobalMean12k-2k-",alb,".pdf"),globMean )
#plot bands:
colorsHi <- RColorBrewer::brewer.pal(6,"Dark2")
plot12k <- ggplot()
for(lb in 1:(length(latbins)-1)){
out <- as.matrix(map_dfc(ensOut[[alb]],extract2,lb))
plot12k <- plotTimeseriesEnsRibbons(plot12k,X = binAges, Y = out,alp = .5,colorHigh = colorsHi[lb],lineColor = colorsHi[lb],lineWidth = 1,x.bin = seq(-1,12000,by = 10))+
geom_text(aes(x = 6000), y = (lb * 1.5) - 11 ,label = paste(latbins[lb],"to",latbins[lb+1]),color = colorsHi[lb])
}
plot12k <- plot12k +
scale_x_reverse(name = "Year (BP)",breaks = seq(0,12000,2000))+
scale_y_continuous(name = "Temperature (deg C) (wrt 1000-2000 AD)",oob = scales::squish)+
ggtitle("Zonal Mean Temperature (Composite Plus Scale)")
theme_bw()
ggsave(filename = paste0("LatBands12k-2k-",alb,".pdf"),plot12k )
plot12k
#plot 2k reconstructions
targets <- list.files(".",pattern = "PAGES",full.names = TRUE)
targetsShort <- list.files(".",pattern = "PAGES",full.names = FALSE)
targ <- purrr::map(targets,read.csv)
plot2k <- ggplot()
for(lb in 1:(length(latbins)-1)){
plotlb <- ggplot()
thisTarget <- which(stringr::str_starts(string = targetsShort, paste0(latbins[lb],"to",latbins[lb+1])))
if(length(thisTarget) != 1){
stop("target matching problem")
}
out <- as.matrix(targ[[thisTarget]])
out2 <- as.matrix(map_dfc(ensOut[[alb]],extract2,lb))
ba2 <- 1950-binAges
out2 <- out2[which(ba2 > 0), ]
ba2 <- ba2[which(ba2 > 0) ]
#plot this band
plotlb <- plotTimeseriesEnsRibbons(plotlb,X = out[,1], Y = scale(out[,-1],scale = FALSE),alp = .8,colorHigh = colorsHi[lb],lineColor = colorsHi[lb],lineWidth = 1,x.bin = seq(0,2000,by=2)) %>%
#plotTimeseriesEnsRibbons(X = ba2, Y = scale(out2,scale = FALSE),alp = .4,colorHigh = colorsHi[lb],lineColor = colorsHi[lb],lineWidth = 1,x.bin = seq(0,2000,by = 10))+
plotTimeseriesEnsLines(X = ba2, Y = scale(out2,scale = FALSE))+
scale_x_continuous(name = "Year (AD)",breaks = seq(0,2000,500))+
scale_y_continuous(name = "Temperature (deg C) (wrt 1-2000 AD)",oob = scales::squish)+
ggtitle( paste(latbins[lb],"to",latbins[lb+1]))+
theme_bw()
ggsave(filename = paste0("12k2kcompLat_",latbins[lb],"to",latbins[lb+1],"-",alb,".pdf"),plot = plotlb)
#plot all of them
plot2k <- plotTimeseriesEnsRibbons(plot2k,X = out[,1], Y = out[,-1],alp = .5,colorHigh = colorsHi[lb],lineColor = colorsHi[lb],lineWidth = 1,x.bin = seq(0,2000,by=2))+
geom_text(aes(x = 1500), y = (lb * .35), label = paste(latbins[lb],"to",latbins[lb+1]),color = colorsHi[lb])
out <- as.matrix(map_dfc(ensOut[[alb]],extract2,lb))
}
|
1c12e946293c50a333c81534d74d258fa8f408e8
|
cb4d7327f61fbc91c3eeced9998fc8cd96eef85f
|
/Q6.R
|
f5f3166ba2e9c7f3fb7581c466cb9af3c7a6e4fe
|
[] |
no_license
|
digust10/Ad2_Lab1
|
0a016026e8ce03107d9dc210a9b8b292c800f779
|
a1276771f16a011c096cab53a8ca2cf4e906f793
|
refs/heads/master
| 2020-12-24T13:28:33.547666
| 2014-10-18T17:54:16
| 2014-10-18T17:54:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 465
|
r
|
Q6.R
|
dados = read.csv("salario.csv")
profissionaisSudeste =subset(dados,Regiao == "Sudeste")
salarioMediana = aggregate(Salario.Bruto ~ UF, data = profissionaisSudeste, FUN = median)
salarioMediana = salarioMediana[ order(-salarioMediana[,2]), ]
salarioMediana
profissionaisRJ =subset(dados,UF == "RJ")
salarioMediana = aggregate(Salario.Bruto ~ Cidade, data = profissionaisRJ, FUN = median)
salarioMediana = salarioMediana[ order(-salarioMediana[,2]), ]
salarioMediana
|
62c11e65669ed5259155b8b478d0106d1751a7e3
|
5718519f59c4cb41439d48171215ef284feae197
|
/plot1.R
|
e77ab86b6e95df2b47d73d9851221452ed43b47c
|
[] |
no_license
|
CriscelyLP/Assignment1
|
3f4e3c9f5982f7a4c048afb0c385d69fbf9d06af
|
70d89ff21f46230eebec396a6ce62b2ab3986b23
|
refs/heads/master
| 2021-01-10T18:19:48.162226
| 2015-11-08T17:43:48
| 2015-11-08T17:43:48
| 45,756,435
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 327
|
r
|
plot1.R
|
# Plot1 -------------------------------------------------------------------
# Loading the data
source("loadData.R")
# Plot
png(filename = "plot1.png", width = 480, height = 480, units = "px")
hist(data$Global_active_power, col = "red",
xlab = "Global Active Power (kilowatts)", main = "Global Active Power")
dev.off()
|
2c45fe9b664a7d5dd00f1ac50fabb258b9f4202e
|
2a62e3d95e54c54a3ddcdcfb1c94e8337be1d32c
|
/bioinformatics/optic_calib.r
|
b021efc14b262321fbfc69eec089b43d5f4d052e
|
[
"Apache-2.0"
] |
permissive
|
chaibio/chaipcr
|
e5d45899f614b43bbf244fba58f4169dd15f6702
|
8153be9add46f1b29b89a4dde72ed3ad907321f3
|
refs/heads/master
| 2023-03-15T20:55:35.404867
| 2023-03-11T16:54:08
| 2023-03-11T16:54:08
| 59,871,130
| 76
| 34
| null | 2023-03-11T16:54:10
| 2016-05-28T01:07:56
|
C++
|
UTF-8
|
R
| false
| false
| 17,217
|
r
|
optic_calib.r
|
# calib
# old default: step ids for calibration data
oc_water_step_id <- 2
oc_signal_step_ids <- c('1'=4, '2'=4)
# # default: preset calibration experiment(s) # not set up yet
# preset_calib_id <- 1
# dye2chst <- list( # mapping from dye to channel and step_id.
# 'FAM'=c('channel'=1, 'step_id'=4),
# 'HEX'=c('channel'=2, 'step_id'=6),
# 'JOE'=c('channel'=2, 'step_id'=8)
# )
{ # for testing: 'test_pc1.r' 'amp_2ch' option 26
{ # pre-heating
# # db_name_ <- '20160328_chaipcr'
# # filter not pre-heated
# # calib_id_ <- list('water'=76, 'signal'=c('1'=77, '2'=78))
# oc_water_step_id <- 177
# oc_signal_step_ids <- c('1'=178, '2'=179)
# # 60C pre-heated filter
# # calib_id_ <- list('water'=79, 'signal'=c('1'=81, '2'=80))
# oc_water_step_id <- 182
# oc_signal_step_ids <- c('1'=186, '2'=184)
# # 80C pre-heated filter
# # calib_id_ <- list('water'=84, 'signal'=c('1'=86, '2'=85))
# oc_water_step_id <- 193
# oc_signal_step_ids <- c('1'=197, '2'=195)
} # end: pre-heating
{ # dyes
# db_name_ <- '20160406_chaipcr'
{ # FAM, HEX
# # 0.1 ml new factory. FAM 115, HEX 116.
# # calib_id_ <- list('water'=114, 'signal'=c('1'=115, '2'=116))
# oc_water_step_id <- 264
# oc_signal_step_ids <- c('1'=266, '2'=268)
# # 0.2 ml. FAM 119, HEX 120.
# # calib_id_ <- list('water'=118, 'signal'=c('1'=119, '2'=120))
# oc_water_step_id <- 272
# oc_signal_step_ids <- c('1'=274, '2'=276)
# # 0.1 ml new user. FAM 123, HEX 124.
# # calib_id_ <- list('water'=122, 'signal'=c('1'=123, '2'=124))
# oc_water_step_id <- 280
# oc_signal_step_ids <- c('1'=282, '2'=284)
}
{ # FAM, JOE
# # 0.1 ml new factory. FAM 115, JOE 117.
# # calib_id_ <- list('water'=114, 'signal'=c('1'=115, '2'=117))
# oc_water_step_id <- 264
# oc_signal_step_ids <- c('1'=266, '2'=270)
# # 0.2 ml. FAM 119, JOE 121.
# # calib_id_ <- list('water'=118, 'signal'=c('1'=119, '2'=121))
# oc_water_step_id <- 272
# oc_signal_step_ids <- c('1'=274, '2'=278)
# # 0.1 ml new user. FAM 123, JOE 125.
# # calib_id_ <- list('water'=122, 'signal'=c('1'=123, '2'=125))
# oc_water_step_id <- 280
# oc_signal_step_ids <- c('1'=282, '2'=286)
}
} # end: dyes
{ # mapping from factory to user dye data
# db_name_ <- '20160406_chaipcr'
preset_calib_ids <- list('water'=114,
'signal'=c('FAM'=115, 'HEX'=116, 'JOE'=117))
dye2chst <- list( # mapping from dye to channel and step_id.
'FAM'=c('channel'=1, 'step_id'=266),
'HEX'=c('channel'=2, 'step_id'=268),
'JOE'=c('channel'=2, 'step_id'=270)
)
# # 0.1 ml new user. FAM 123.
# # calib_id_ <- list('water'=122, 'signal'=c('1'=123))
# oc_water_step_id <- 280
# oc_signal_step_ids <- c('1'=282)
}
} # end: for testing
# process preset calibration data
dye2chst_channels <- unique(sapply(dye2chst, function(ele) ele['channel']))
dye2chst_ccsl <- list('set'=dye2chst_channels, 'description'='all channels in the preset calibration data') # ccsl=channels_check_subset_list
# function: check subset
check_subset <- function(list_small, list_big) {
if (!all(list_small[['set']] %in% list_big[['set']])) {
stop(sprintf('%s is not a subset of %s.', list_small[['description']], list_big[['description']]))
}
}
# function: get calibration data for adjusting well-to-well variation in absolute fluo values
get_calib_data <- function(calib_id_s, step_id_s,
db_conn,
calib_id_name_type=c('dye', 'channel')) {
if (length(calib_id_s) == 1 && length(unique(step_id_s)) == 1) {
calib_id <- calib_id_s
step_id <- unique(step_id_s)
calib_qry <- sprintf('SELECT fluorescence_value, well_num, channel
FROM fluorescence_data
WHERE experiment_id=%d
AND step_id=%d
AND cycle_num=1
AND step_id is not NULL
ORDER BY well_num',
calib_id,
step_id)
calib_df <- dbGetQuery(db_conn, calib_qry)
channels_in_df <- as.character(unique(calib_df[,'channel']))
well_names <- unique(calib_df[,'well_num'])
calib_list <- lapply(channels_in_df,
function(channel_in_df) {
calib_vec <- c(calib_df[calib_df[,'channel'] == channel_in_df, 'fluorescence_value']) # Subsetting both row and column of a data frame results to make one of the dimension equal 1 results in a vector instead of a data frame; but subsetting only row or only column to make one of the dimension equal 1 results in a data frame. `c()` is to explicitly ensure output to be a vector, though input is already a vector.
names(calib_vec) <- well_names
return(calib_vec) } )
names(calib_list) <- channels_in_df
} else if (length(calib_id_s) > 1 || length(unique(step_id_s)) > 1) { # for testing with different experiments for calibration
if (length(calib_id_s) == 1 && length(unique(step_id_s)) > 1) {
calib_id_s <- rep(calib_id_s, times=length(step_id_s))
names(calib_id_s) <- names(step_id_s) }
calib_list <- lapply(names(calib_id_s), function(name_calib_id) {
channel <- switch(calib_id_name_type, 'dye'=dye2chst[[name_calib_id]]['channel'], 'channel'=name_calib_id)
# message('calib_id_s[name_calib_id] :', calib_id_s[name_calib_id])
# message('step_id_s[name_calib_id] :', step_id_s[name_calib_id])
# message('as.numeric(channel) :', as.numeric(channel))
calib_qry <- sprintf('SELECT fluorescence_value, well_num
FROM fluorescence_data
WHERE experiment_id=%d
AND step_id=%d
AND channel=%d
AND cycle_num=1
AND step_id is not NULL
ORDER BY well_num',
calib_id_s[name_calib_id],
step_id_s[name_calib_id],
as.numeric(channel))
calib_df <- dbGetQuery(db_conn, calib_qry)
well_names <- unique(calib_df[,'well_num'])
calib_vec <- c(calib_df[,'fluorescence_value']) # `c()` is to transform col_df into a vector
names(calib_vec) <- well_names
return(calib_vec) } )
names(calib_list) <- names(calib_id_s) }
return(calib_list)
}
# function: check whether the data in optical calibration experiment is valid; if yes, prepare calibration data
prep_optic_calib <- function(db_conn, calib_id_s, dye_in='FAM', dyes_2bfild=NULL) {
length_calib_id_s <- length(calib_id_s)
if (length_calib_id_s == 1) { # `calib_id_s` is an integer
water_calib_id <- calib_id_s
signal_calib_id_s <- calib_id_s
} else { # calib_id_s is a list with > 1 elements
calib_id_s_names <- names(calib_id_s)
if (calib_id_s_names[2] == 'signal') { # xqrm format
water_calib_id <- calib_id_s[['water']]
signal_calib_id_s <- calib_id_s[['signal']]
} else { # chai format: "list(water=list(calibration_id=..., step_id=...), channel_1=list(calibration_id=..., step_id=...), channel_2=list(calibration_id=...", step_id=...)"
water_cs_list <- calib_id_s[['water']]
water_calib_id <- water_cs_list[['calibration_id']]
oc_water_step_id <- water_cs_list[['step_id']]
ci_channel_is <- calib_id_s_names[2:length_calib_id_s]
names(ci_channel_is) <- sapply(calib_id_s_names[2:length_calib_id_s],
function(calib_id_s_name) strsplit(calib_id_s_name, split='_')[[1]][2])
signal_calib_id_s <- sapply(ci_channel_is,
function(ci_channel_i) calib_id_s[[ci_channel_i]][['calibration_id']])
oc_signal_step_ids <- sapply(ci_channel_is,
function(ci_channel_i) calib_id_s[[ci_channel_i]][['step_id']]) }}
calib_water_list <- get_calib_data(water_calib_id, oc_water_step_id, db_conn, NULL)
channels_in_water <- names(calib_water_list)
check_subset(list('set'=channels_in_water, 'description'='Input water channels'), dye2chst_ccsl)
names(channels_in_water) <- channels_in_water
calib_signal_list <- get_calib_data(signal_calib_id_s, oc_signal_step_ids, db_conn, 'channel')
channels_in_signal <- names(calib_signal_list)
check_subset(list('set'=channels_in_signal, 'description'='Input signal channels'), dye2chst_ccsl)
names(channels_in_signal) <- channels_in_signal
# check data length
water_lengths <- sapply(calib_water_list, length)
signal_lengths <- sapply(calib_signal_list, length)
if (length(unique(c(water_lengths, signal_lengths))) > 1) {
stop(sprintf('data length not equal across all the channels and/or between water and signal. water: %s. signal: %s', paste(water_lengths, collapse=', '), paste(signal_lengths, collapse=', '))) }
# check whether signal > water
well_names <- names(calib_water_list[[1]])
stop_msgs <- c()
for (channel_in_signal in channels_in_signal) {
calib_invalid_vec <- (calib_signal_list[[channel_in_signal]] - calib_water_list[[channel_in_signal]] <= 0)
if (any(calib_invalid_vec)) {
ci_well_nums_str <- paste(paste(well_names[calib_invalid_vec], collapse=', '), '. ', sep='')
stop_msgs[channel_in_signal] <- paste(
sprintf('Invalid calibration data in channel %s: ', channel_in_signal),
'fluorescence value of water is greater than or equal to that of dye in the following well(s) - ', ci_well_nums_str,
sep='')
} }
if (length(stop_msgs) > 0) {
stop(paste(stop_msgs, collapse='\n')) }
if (length(dyes_2bfild) > 0) { # extrapolate calibration data for missing channels
message('Preset calibration data is used to extrapolate calibration data for missing channels.')
channels_missing <- setdiff(channels_in_water, channels_in_signal)
dyes_2bfild_channels <- sapply(dyes_2bfild, function(dye) dye2chst[[dye]]['channel'])
check_subset(list('set'=channels_missing, 'description'='Channels missing calibration data'),
list('set'=dyes_2bfild_channels, 'description'='channels corresponding to the dyes of which calibration data is needed'))
# process preset calibration data
preset_step_ids <- sapply(dye2chst, function(ele) ele['step_id'])
names(preset_step_ids) <- names(dye2chst)
preset_calib_signal_list <- get_calib_data(preset_calib_ids[['signal']],
preset_step_ids,
db_conn,
'dye')
pivot_preset <- preset_calib_signal_list[[dye_in]]
pivot_in <- calib_signal_list[[dye2chst[[dye_in]]['channel']]]
in2preset <- pivot_in / pivot_preset
for (dye_2bfild in dyes_2bfild) {
calib_signal_list[[dye2chst[[dye_2bfild]]['channel']]] <- preset_calib_signal_list[[dye_2bfild]] * in2preset }
}
oc_data <- lapply(list('water'=calib_water_list,
'signal'=calib_signal_list),
function(ele) { calib_mtx <- do.call(rbind, ele)
rownames(calib_mtx) <- names(ele)
return(calib_mtx) } )
return(oc_data)
}
# function: perform optical (water) calibration on fluo
optic_calib <- function(fluo, oc_data, channel, minus_water=FALSE, show_running_time=FALSE) {
# start counting for running time
func_name <- 'calib'
start_time <- proc.time()[['elapsed']]
# perform calibration
if (minus_water) {
oc_water <- oc_data[['water']][as.character(channel),]
} else oc_water <- 0
oc_signal <- oc_data[['signal']][as.character(channel),]
signal_water_diff <- oc_signal - oc_water
swd_normd <- signal_water_diff / mean(signal_water_diff)
fluo_calib <- adply(fluo, .margins=1,
function(row1) scaling_factor_optic_calib * (row1 - oc_water) / swd_normd) # if ist argument is a matrix (mc), adply automatically create a column at index 1 of output from rownames of input array (1st argument); else if 1st argument is data frame (amp), that column is not added.
# report time cost for this function
end_time <- proc.time()[['elapsed']]
if (show_running_time) message('`', func_name, '` took ', round(end_time - start_time, 2), ' seconds.')
return(list('fluo_calib'=fluo_calib,
'signal_water_diff' = scaling_factor_optic_calib * swd_normd))
}
# function: get calibration data for all the steps and channels in a calibration experiment
get_full_calib_data <- function(db_conn, calib_info) {
calib_names <- names(calib_info)
channel_names <- calib_names[2:length(calib_names)]
channels <- sapply(channel_names, function(channel_name) strsplit(channel_name, '_')[[1]][2])
# num_channels <- length(channels)
# names(channel_names) <- channels
calib_list <- lapply(calib_info, function(calib_ele) {
calib_qry <- sprintf('
SELECT fluorescence_value, well_num, channel
FROM fluorescence_data
WHERE experiment_id=%d AND step_id=%d AND cycle_num=1 AND step_id is not NULL
ORDER BY well_num, channel
',
calib_ele[['calibration_id']],
calib_ele[['step_id']]
)
calib_df <- dbGetQuery(db_conn, calib_qry)
calib_data <- do.call(rbind, lapply(
channels,
function(channel) calib_df[calib_df[, 'channel'] == as.numeric(channel), 'fluorescence_value']
))
colnames(calib_data) <- unique(calib_df[,'well_num'])
return(calib_data)
})
return(calib_list)
}
# function: perform deconvolution and adjustment of well-to-well variation on calibration experiment 1 using the k matrix `wva_data` made from calibration expeirment 2
calib_calib <- function(
db_conn_1,
db_conn_2,
calib_info_1,
calib_info_2,
dye_in='FAM', dyes_2bfild=NULL,
dye_names=c('FAM', 'HEX')
) {
full_calib_data_1 <- get_full_calib_data(db_conn_1, calib_info_1)
step_names <- names(full_calib_data_1)
dye_idc <- 2:length(step_names)
channel_names <- step_names[dye_idc]
channels <- sapply(channel_names, function(channel_name) strsplit(channel_name, '_')[[1]][2])
names(channels) <- channels
num_channels <- length(full_calib_data_1) - 1
well_nums <- colnames(full_calib_data_1[[1]])
num_wells <- length(well_nums)
ori_swvad_1 <- lapply(channels, function(channel)
array(NA, dim=c(length(step_names), num_wells), dimnames=list(step_names, well_nums))
) # `full_calib_data_1` in the same format as `wvad_list`
ary2dcv_1 <- array(NA, dim=c(num_channels, length(step_names), num_wells), dimnames=list(channels, step_names, well_nums))
wva_data_2 <- prep_optic_calib(db_conn_2, calib_info_2, dye_in, dyes_2bfild)
for (channel_i in 1:num_channels) {
for (step_name in step_names) {
fcd1_unit <- full_calib_data_1[[step_name]][channel_names[channel_i],]
ori_swvad_1[[channels[channel_i]]][step_name,] <- fcd1_unit
ary2dcv_1[channels[channel_i], step_name,] <- fcd1_unit - wva_data_2[['water']][channels[channel_i],]
}
}
dcvd_out_1 <- deconv(ary2dcv_1, db_conn_2, calib_info_2)
dcvd_array_1 <- dcvd_out_1[['dcvd_array']]
wvad_list_1 <- lapply(channels, function(channel) {
wva <- optic_calib(
matrix(dcvd_array_1[channel,,], ncol=num_wells),
wva_data_2,
channel,
minus_water=FALSE
)$fluo_calib[,2:(num_wells+1)]
rownames(wva) <- step_names
return(wva)
})
if (length(dye_names) > 0) {
for (channel in channels) {
rownames(ori_swvad_1[[channel]])[dye_idc] <- dye_names
rownames(wvad_list_1[[channel]])[dye_idc] <- dye_names
}
}
return(list(
'ori_swvad_1'=ori_swvad_1,
'ary2dcv_1'=ary2dcv_1,
'k_list_temp_2'=dcvd_out_1[['k_list_temp']],
'wva_data_2'=wva_data_2,
'wvad_list_1'=wvad_list_1
))
}
|
8f79b3b7e8eab899d3c7edb2bade99cb80e28afa
|
b3f764c178ef442926a23652c4848088ccd40dca
|
/man/Lda-class.Rd
|
61a6169caeedd4deb9f4bab92d3b3f5963b0468c
|
[] |
no_license
|
armstrtw/rrcov
|
23e7642ff2fd2f23b676d4ad8d5c451e89949252
|
684fd97cdf00750e6d6fd9f9fc4b9d3d7a751c20
|
refs/heads/master
| 2021-01-01T19:51:52.146269
| 2013-07-24T18:18:24
| 2013-07-24T18:18:24
| 11,597,037
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,527
|
rd
|
Lda-class.Rd
|
\name{Lda-class}
\docType{class}
\alias{Lda-class}
\alias{predict,Lda-method}
\alias{show,Lda-method}
\alias{summary,Lda-method}
\title{Class "Lda" - virtual base class for all classic and robust LDA classes
}
\description{ The class \code{Lda} serves as a base class for deriving
all other classes representing the results of classical
and robust Linear Discriminant Analisys methods }
\section{Objects from the Class}{A virtual Class: No objects may be created from it.}
\section{Slots}{
\describe{
\item{\code{call}:}{the (matched) function call.}
\item{\code{prior}:}{prior probabilities used, default to group proportions}
\item{\code{counts}:}{number of observations in each class}
\item{\code{center}:}{the group means}
\item{\code{cov}:}{the common covariance matrix}
\item{\code{ldf}:}{a matrix containing the linear discriminant functions}
\item{\code{ldfconst}:}{a vector containing the constants of each linear discriminant function}
\item{\code{method}:}{a character string giving the estimation method used}
\item{\code{X}:}{the training data set (same as the input parameter x of the constructor function)}
\item{\code{grp}:}{grouping variable: a factor specifying the class for each observation.}
}
}
\section{Methods}{
\describe{
\item{predict}{\code{signature(object = "Lda")}: calculates prediction using the results in
\code{object}. An optional data frame or matrix in which to look for variables with which
to predict. If omitted, the training data set is used. If the original fit used a formula or
a data frame or a matrix with column names, newdata must contain columns with the
same names. Otherwise it must contain the same number of columns,
to be used in the same order. }
\item{show}{\code{signature(object = "Lda")}: prints the results }
\item{summary}{\code{signature(object = "Lda")}: prints summary information }
}
}
\references{
Todorov V & Filzmoser P (2009),
An Object Oriented Framework for Robust Multivariate Analysis.
\emph{Journal of Statistical Software}, \bold{32}(3), 1--47.
URL \url{http://www.jstatsoft.org/v32/i03/}.
}
\author{ Valentin Todorov \email{valentin.todorov@chello.at} }
\seealso{
\code{\link{LdaClassic}}, \code{\link{LdaClassic-class}}, \code{\link{LdaRobust-class}}
}
\examples{
showClass("Lda")
}
\keyword{classes}
\keyword{robust}
\keyword{multivariate}
|
e37f0aeb7c577aed2844d141f97fcf29baf3a098
|
b16359a14fd84fa9bc81f9b4d1d6d259fe42f770
|
/scripts/INFO 201 map.R
|
dd53d1d75e79eb18b8c8b75ca82ce47ce3bdae3c
|
[] |
no_license
|
Ethan-McGregor/seattle-collisions
|
daf3ba35708b6af2e88e3664f8813b981426c806
|
0b7236c4289e8c3c8bd8eb08a69b9e22ea2b89a4
|
refs/heads/master
| 2021-01-23T00:57:00.484769
| 2017-06-02T00:57:31
| 2017-06-02T00:57:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 857
|
r
|
INFO 201 map.R
|
library(shiny)
library(leaflet)
library(plotly)
library(dplyr)
library(knitr)
data <- read.csv('data/SDOT_Collisions.csv')
# remove missing data
acc.data <- na.omit(data)
# find most popular month
freq <- as.data.frame(table(acc.data$INCDTTM))
month.day = sub('/([^/]*)$', '', freq[,1])
month.day <- as.data.frame(table(month.day))
month.data <- gsub( "/.*$", "", freq[,1] )
month <- as.data.frame(table(month.data))
# graph and components
bar_graph <- plot_ly(month, x = month$month.data, y = month$Freq, type = 'bar',
marker = list(color = c('black', 'blue', 'black', 'black', 'black',
'black', 'black', 'black', 'black', 'black', 'black', 'black'))) %>%
layout(title = "Collisions by month",
xaxis = list(title = "Month number"),
yaxis = list(title = "Total collisions occured"))
# display graph
bar_graph
|
a7eeca91f04d8531baae3e4ba3604dec3bcc66a9
|
58759602aafec49cd8d854beb9b366e0af3f69c3
|
/man/regularize.Rd
|
d8ab1e6ce63153ef1f63e2cda48f9165bf637d37
|
[
"MIT"
] |
permissive
|
macheng94/bioRad
|
2ce38553b5f145ca046d90f5a173e44064976449
|
38719d9e7a15b009b9adfef5cb2415e4b6c5f7df
|
refs/heads/master
| 2020-03-10T08:20:47.285310
| 2018-03-27T21:10:20
| 2018-03-27T21:10:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,452
|
rd
|
regularize.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bioRad.R
\name{regularize}
\alias{regularize}
\title{Regularize a time series}
\usage{
regularize(ts, interval = "auto", t.min = ts$daterange[1],
t.max = ts$daterange[2], units = "mins", fill = F, verbose = T)
}
\arguments{
\item{ts}{an object inhereting from class \code{vpts}, see \link{vpts} for details.}
\item{interval}{time interval grid to project on. When '\code{auto}' the median interval in the time series is used.}
\item{t.min}{start time of the projected time series, as a POSIXct object. Taken from \code{ts} when '\code{auto}'.}
\item{t.max}{end time of the projected time series, as a POSIXct object. Taken from \code{ts} when '\code{auto}'.}
\item{units}{optional units of \code{interval}, one of 'secs', 'mins', 'hours','days', 'weeks'. Defaults to 'mins'.}
\item{fill}{logical. Whether to fill missing timesteps with the values of the closest neighbouring profile.}
\item{verbose}{logical. When \code{TRUE} prints text to console.}
}
\value{
an object of class \code{vpts} with regular time steps
}
\description{
Projects objects of class \code{vpts} on a regular time grid
}
\examples{
# locate example file:
VPtable <- system.file("extdata", "VPtable.txt", package="bioRad")
# load time series:
ts=readvp.table(VPtable,radar="KBGM", wavelength='S')
# regularize the time series on a 5 minute interval grid
tsRegular=regularize(ts, interval=5)
}
|
e7397d8ca9f5e990b310b48f7537cc627c6dbd96
|
b29b91f1c84be419c2135cb8283d34890437ef48
|
/man/derivative.Rd
|
41bc741ed53167d4c1f83e89bc4b7328f863efb3
|
[] |
no_license
|
Susarro/arqastwb
|
8ededc0e8c5f8d74e0dd8df5cd196b3082d377db
|
0dd802ec946d25f306d6018c0f0f3d52f6bddfc8
|
refs/heads/master
| 2020-05-04T12:45:17.341662
| 2019-04-03T20:57:14
| 2019-04-03T20:57:14
| 179,133,627
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 616
|
rd
|
derivative.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/roots.R
\name{derivative}
\alias{derivative}
\title{First derivative for the function and value given}
\usage{
derivative(fnc, x, incrx = 0.01)
}
\arguments{
\item{fnc}{Function}
\item{x}{Value in which the derivative should be calculated}
\item{incrx}{increment in x to calculate the derivative}
}
\value{
derivative value at the value given
}
\description{
First derivative for the function and value given
}
\examples{
fnc<-function(x) apparent_equatorial_position(x,"sun")$declination
x <- str2jd("2014-03-08")
derivative(fnc,x)
}
|
4fa0fb8d81378425577350058da9dbb19da054cd
|
7139dd72c06d321690853dca65e5ad5da68ec3f5
|
/revenue/notepad.R
|
efc4ee440462720a10048a828ee485eb2a2e920b
|
[] |
no_license
|
stylianos-kampakis/kaggle_competitions
|
94a2668789276de8c9863d09dad42e52ed69ad70
|
7b0073481c783437330542ede168a0ec2a7af28c
|
refs/heads/master
| 2021-01-19T11:48:20.745415
| 2015-05-21T13:03:10
| 2015-05-21T13:03:10
| 35,339,553
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,166
|
r
|
notepad.R
|
d<-read.csv("C:\\train.csv")
d$Open.Date=as.Date(d$Open.Date,"%m/%d/%Y")
d$Open.Date=format(d$Open.Date,"%Y")
d$Open.Date=as.numeric(d$Open.Date)
d$Id=NULL
d$City<-NULL
d$City.Group<-NULL
d$Type<-NULL
d$revenue=log(d$revenue)
library(MASS)
fit=lm(revenue~.,d)
step <- stepAIC(fit, direction="both")
step$anova # display results
m1=lm(revenue~Open.Date*P2*P8*P9*P20*P24*P25*P26*P28,d)
step<-stepAIC(m1,direction="both")
h=summary(step)
h=coef(h)
formula="revenue~"
for(i in seq(2,nrow(h))){
if(h[i,4]<0.01){
formula=paste(formula,rownames(h)[i],"+")
}
}
#remove the last '+'
formula=substr(formula,1,nchar(formula)-1)
m2=lm(as.formula(formula),d)
summary(m2)
library(epiR)
formula="revenue~ P20+P28+P2+P9+Open.Date+P20:P28 + Open.Date:P20:P28 + P2:P20:P28 + P9:P20:P28 + Open.Date:P2:P20:P28 + Open.Date:P9:P20:P28 + P2:P8:P9:P20:P28 + Open.Date:P2:P8:P9:P20:P28 "
predvalues=numeric(nrow(d))
for(i in seq(1,nrow(d))){
train=d[-i,]
test=d[i,]
model=lm(as.formula(formula),train)
res=predict(model,test)
predvalues[i]=res
}
epi.ccc(d$revenue,predvalues)$rho.c
plot(predvalues,d$revenue)
sqrt(mean((exp(predvalues)-exp(d$revenue))^2))
|
de618019a2d9306101c6f04fd6acb1f471897ae0
|
bbde47b8c2b383416bf7f3ddbb46923e6d583fa2
|
/R/PedToNum.R
|
d35d35292d365e111081f066c2ad260d5a304e8b
|
[] |
no_license
|
JiscaH/sequoia
|
51e2988aae6273ea7af21806d8b4753b1a2eec72
|
0a7bbad4ad6afd44ded9a1743d30e4aea5e27b8f
|
refs/heads/master
| 2023-06-26T19:22:31.694572
| 2023-06-20T13:44:26
| 2023-06-20T13:44:26
| 52,889,270
| 24
| 6
| null | 2021-05-03T15:23:18
| 2016-03-01T15:51:52
|
Fortran
|
UTF-8
|
R
| false
| false
| 8,090
|
r
|
PedToNum.R
|
#=============================================================================
#' @title Turn Character Pedigree into Numeric Pedigree
#'
#' @description Genotyped individuals get rownumber in genotype matrix,
#' non-genotyped individuals either all get an arbitrary negative number
#' (\code{DoDummies = 'new'}) or only individuals with a dummy ID get the
#' corresponding negative number (\code{DoDummies = 'old'}). Note that the
#' number series will overlap for dummy males and dummy females.
#'
#' @param Pedigree dataframe with id - dam - sire. It is assumed
#' \code{\link{PedPolish}} has been called beforehand so that column names are
#' correct and all columns are as.character.
#' @param gID vector with IDs of SNP-genotyped individuals.
#' @param DoDummies 'new', 'old', or 'no' (ignore all non-genotyped
#' individuals).
#' @param DumPrefix Prefix to identify dummies when \code{DoDummies = 'old'}
#'
#' @return a list with
#' \item{PedPar}{An nInd x 2 matrix with the numeric IDs of parents of
#' genotyped individuals}
#' \item{DumPar}{A matrix with parents of dummies, see
#' \code{\link{FoldSibGPs}}}
#' \item{Renamed}{a length-2 list (dams, sires) with each element a dataframe
#' with columns: 'name' (original character ID), 'num' (number ID, negative)
#' for each dummified individual}
#' \item{Nd}{a length 2 vector, no. dummies found/created for dams and sires}
#'
#' @details If \code{DoDummies='new'}, \code{\link{GetDummifiable}} is used
#' with \code{minSibSize = "1sib"}, and any existing dummy coding is ignored
#' (F0001, F0002 may become -3, -6). If \code{DoDummies='old'}, the existing
#' dummy coding is respected (F0001, F0002 will become -1, -2), but other
#' non-genotyped individuals are ignored.
#'
#' @keywords internal
PedToNum <- function(Pedigree = NULL,
gID = NULL,
DoDummies = "new",
DumPrefix = c("F0", "M0"))
{
if (is.null(Pedigree)) {
if (is.null(gID)) stop("PedToNum needs Pedigree and/or gID")
return( list(PedPar = rep(0, 2*length(gID)),
DumPar = rep(0, 4*as.integer(length(gID)/2)),
Renamed = NA,
Nd = 0) )
} else if (!is.null(gID) && !all(gID %in% Pedigree$id)) {
Pedigree <- rbind(Pedigree,
data.frame(id = gID[!gID %in% Pedigree$id],
dam = NA,
sire = NA))
}
if (is.null(gID)) stop("Please provide 'gID'")
if (length(DumPrefix) > 2 & DoDummies=="old" & !all(Pedigree$id %in% gID))
warning(">2 DumPrefixes not supported by PedToNum", immediate.=TRUE)
if (!DoDummies %in% c("old", "new", "no"))
stop("'DoDummies' must be 'old', 'new', or 'no'")
DPnc <- nchar(DumPrefix)
# Dummy renaming tables ----
Renamed <- list(dam = data.frame(), sire=data.frame())
if (DoDummies == "new") {
Dummifiable <- GetDummifiable(Pedigree[,1:3], gID, minSibSize = "1sib")
for (k in 1:2) {
Renamed[[k]] <- data.frame(name = Dummifiable[[k]],
num = -seq_along(Dummifiable[[k]]),
stringsAsFactors = FALSE)
}
} else if (DoDummies == "old") {
for (k in 1:2) {
UniqueDummies <- sort(unique(Pedigree[substr(Pedigree[,k+1], 1,
DPnc[k]) == DumPrefix[k], k+1]))
if (length(UniqueDummies)==0) next
Renamed[[k]] <- data.frame(name = UniqueDummies,
num = -as.numeric(substr(UniqueDummies,
DPnc[k]+1, nchar(UniqueDummies))),
stringsAsFactors = FALSE)
}
}
Nd <- sapply(Renamed, nrow)
# names to numbers ----
NumPed <- matrix(0, nrow(Pedigree), 4,
dimnames=list(Pedigree$id, c("id", "dam", "sire", "sex")))
# sex used by FoldSibsGPs() to tell female/male dummies apart
# genotyped
GenoNums <- setNames(seq_along(gID), gID)
for (x in 1:3) {
NumPed[, x] <- GenoNums[Pedigree[, x]]
}
# dummies
if (DoDummies %in% c("old", "new")) {
for (k in 1:2) { # female, male
if (Nd[k] == 0) next
if (Nd[k] > 9999) stop("Too many dummies")
for (x in 1:3) { # pedigree column
if (x!=1 & x!=k+1) next
these <- Pedigree[,x] %in% Renamed[[k]]$name
NumPed[these, x] <- Renamed[[k]][match(Pedigree[these,x], Renamed[[k]]$name), "num"]
if (x==1) NumPed[these, "sex"] <- k
}
}
}
NumPed[is.na(NumPed)] <- 0
# fold GPs & out ----
PedPar <- NumPed[match(gID, Pedigree$id), 2:3]
if (DoDummies %in% c("old", "new")) {
DumPar <- FoldSibGPs(PedNum = NumPed, Ng = length(gID), Nd = Nd)
} else {
DumPar <- rep(0, 4*as.integer(length(gID)/2))
}
return( namedlist(PedPar, DumPar, Renamed, Nd) )
}
#=============================================================================
#=============================================================================
#' @title Fold IDs of Sibship Grandparents
#'
#' @description Fold IDs of sibship grandparents into a 2 x nInd/2 x 2 array, as
#' they are stored in Fortran, and then stretch this into a vector that can be
#' passed to Fortran and easily be transformed back into said 3D array.
#'
#' @param PedNum pedigree, ids replaced by numbers, dummies negative.
#' @param Ng no. genotyped indivs.
#' @param Nd length 2 vector, no. female & male dummies.
#'
#' @return An integer vector, with missing values as 0.
#'
#' @keywords internal
FoldSibGPs <- function(PedNum, Ng, Nd)
{
DumParRF <- rep(0, 4*as.integer(Ng/2))
if (any(Nd > 0)) {
SibshipGPs <- array(0, dim=c(2,max(Nd),2),
dimnames=list(c("grandma", "granddad"),
1:max(Nd), c("mat", "pat")))
for (k in 1:2) {
if (Nd[k] == 0) next
# pedigree subset: parents of dummy dams (k=1) or dummy sires (k=2)
PedDum.k <- PedNum[PedNum[,"id",drop=FALSE] < 0 & PedNum[,"sex"] == k, , drop=FALSE]
SibshipGPs[,1:Nd[k],k] <- t(PedDum.k[order(-PedDum.k[,"id"]), 2:3])
for (s in 1:Nd[k]) {
for (g in 1:2) {
x <- (k-1)*2*as.integer(Ng/2) + (s-1)*2 + g
DumParRF[x] <- SibshipGPs[g,s,k]
}
}
}
}
return( DumParRF )
}
#=============================================================================
#=============================================================================
#' @title Change Numeric Pedigree back to Character Pedigree
#'
#' @description Reverse \code{\link{PedToNum}}, 1 column at a time.
#'
#' @param x vector with numbers.
#' @param k 1=dam, 2=sire, needed to distinguish dummy females from dummy males.
#' @param gID vector with IDs of SNP-genotyped individuals; rownames of
#' genotype matrix in the exact order.
#' @param DumPrefix length-2 character vector to make dummy IDs; length-3 in
#' case of hermaphrodites.
#'
#' @return A character vector with IDs.
#'
#' @keywords internal
NumToID <- function(x, k=0, gID=NULL, DumPrefix = c("F", "M"))
{
if (length(x)==0) return()
if (any(is.na(x) | !is.wholenumber(x)))
stop("x must be whole numbers, something went wrong")
xv <- x
xv[xv < -1e6] <- xv[xv < -1e6] + 1e6 # hermaphrodite dummy clones
Nd.k <- ifelse(all(xv >= 0), 0, abs(min(xv, na.rm=TRUE)))
if (Nd.k > 9999) stop("\nMore than 9999 dummies! Cannot parse output.")
if (Nd.k > 999 && any(nchar(DumPrefix)==1))
warning("\nMore than 999 dummies! Please use DummyPrefix of >1 character to avoid ambiguity",
immediate. = TRUE)
if (length(k)==1) k <- rep(k, length(x))
if (!all(k[x<0] %in% 1:2)) stop("Invalid k")
ID <- sapply(seq_along(x), function(i) {
ifelse(x[i] > 0,
gID[x[i]],
ifelse(x[i] < -1e6,
paste0(DumPrefix[3], formatC(- (x[i] + 1e6), width=4, flag=0)),
ifelse(x[i] < 0,
paste0(DumPrefix[k[i]], formatC(-x[i], width=4, flag=0)),
NA)))
})
return( ID )
}
|
342b1d5cffbbd10293d40c5a34cd47a9bb3697e4
|
fd0ab0f09d3c07f03e0af82bf93875524c44a0e9
|
/tmp-tests/test-clumping-bed.R
|
1ec0c078417c66d8b8cda6b6e0abe6c3422eeec5
|
[] |
no_license
|
privefl/bigsnpr
|
b05f9e36bcab6d8cc86fb186c37fe94a6425960a
|
83db98f974b68132a9a3f3ee7ca388159a4c12b5
|
refs/heads/master
| 2023-08-02T13:31:18.508294
| 2023-06-30T12:15:55
| 2023-06-30T12:15:55
| 62,644,144
| 162
| 47
| null | 2022-10-12T16:46:15
| 2016-07-05T14:36:34
|
R
|
UTF-8
|
R
| false
| false
| 1,151
|
r
|
test-clumping-bed.R
|
library(bigsnpr)
fake <- snp_fake(200, 100)
fake$genotypes[] <- sample(0:3, length(fake$genotypes), replace = TRUE,
prob = c(1, 1, 1, 3))
(n_bad <- sum(big_counts(fake$genotypes)[4, ] > 100))
bed <- snp_writeBed(fake, tempfile(fileext = ".bed"))
library(testthat)
expect_warning(ind.keep <- bed_clumping(bed, thr.r2 = 0.01),
sprintf("%d variants have >50%% missing values.", n_bad))
plink <- download_plink("tmp-data")
tmp <- sub("\\.bed$", "", bed)
counts <- big_counts(fake$genotypes)
af <- drop(crossprod(counts[1:3, ], 0:2)) / colSums(counts[1:3, ]) / 2
maf <- pmin(af, 1 - af)
write.table(data.frame(SNP = fake$map$marker.ID, P = 1 - maf),
file = paste0(tmp, ".frq"), row.names = FALSE, quote = FALSE)
# Clumping
library(glue)
system(glue("{plink} --bfile {tmp} --out {tmp}",
" --clump {tmp}.frq",
" --clump-p1 1 --clump-p2 1 --clump-r2 0.2"))
ind <- match(read.table(glue("{tmp}.clumped"), header = TRUE)$SNP, fake$map$marker.ID)
2 * length(intersect(ind, ind.keep)) / (length(ind) + length(ind.keep))
length(intersect(ind, ind.keep)) / length(union(ind, ind.keep))
|
e2af9ab687247fed9aef33cd44e88f52393fa97b
|
742987e658baec8f280792b07253b8e1d7d00bf4
|
/R/Prob.branch.R
|
7e4bfdb20570f38eb40c97b1d6813f42d7e5d259
|
[] |
no_license
|
ZRChao/LRTT
|
ab083de0a8d64f688ac68ff7e2cd2cff39ff10ae
|
47ea5c46adf326e101b86b80a95019182980f178
|
refs/heads/master
| 2020-03-06T14:47:49.748541
| 2018-09-12T11:52:33
| 2018-09-12T11:52:33
| 126,942,320
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 591
|
r
|
Prob.branch.R
|
## function to simulate the probability along the tree branch
Prob.branch = function(tree, seed, dif.taxa){
p <- min(tree$edge[, 1] - 1)
prob1 <- rep(0, nrow(tree$edge))
prob2 <- rep(0, nrow(tree$edge))
for(j in (p + 1) : (p + tree$Nnode)){
set.seed(j*seed)
pi0 <- runif(1, 0.1, 0.9)
prob1[which(tree$edge[, 1] == j)] <- c(pi0, 1 - pi0)
}
prob1 -> prob2 ###case_probability
for(i in 1:length(dif.taxa)){
parent <- dif.taxa[i]
temp_diff <- which(tree$edge[, 1] == parent)
prob2[temp_diff] <- prob1[rev(temp_diff)]
}
return(cbind(prob1, prob2))
}
|
248bbbe1a224fe2f5c23d24abad3e18268b36655
|
f12cfd4a9c89ae6ae5c9d65d46c4535a75520570
|
/inst/doc/FateID.R
|
df4c819d47998f8dc3180a5a47f6ece26c95c260
|
[] |
no_license
|
dgrun/FateID
|
946747f3eed9f853bb65e6ed9e0c1bc4e2bbff31
|
f2ea6f9b4981ae47cb1e5fd55d38ae9de144ad52
|
refs/heads/master
| 2022-06-24T02:21:20.578956
| 2022-06-14T12:17:40
| 2022-06-14T12:17:40
| 94,758,534
| 20
| 7
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,073
|
r
|
FateID.R
|
## ----echo=FALSE---------------------------------------------------------------
knitr::opts_chunk$set(fig.width=8, fig.height=8)
## ----eval = FALSE-------------------------------------------------------------
# install.packages("FateID")
## -----------------------------------------------------------------------------
library(FateID)
## -----------------------------------------------------------------------------
data(intestine)
## -----------------------------------------------------------------------------
x <- intestine$x
head(x[,1:5])
## -----------------------------------------------------------------------------
y <- intestine$y
head(y)
## -----------------------------------------------------------------------------
tar <- c(6,9,13)
## -----------------------------------------------------------------------------
FMarker <- list(c("Defa20__chr8","Defa24__chr8"), "Clca3__chr3", "Alpi__chr1")
xf <- getPart(x,FMarker,fthr=NULL,n=5)
head(xf$part)
head(xf$tar)
tar <- xf$tar
y <- xf$part
## -----------------------------------------------------------------------------
rc <- reclassify(x, y, tar, clthr=.75, nbfactor=5, use.dist=FALSE, seed=12345, nbtree=NULL, q=0.9)
y <- rc$part
## -----------------------------------------------------------------------------
v <- intestine$v
rc <- reclassify(v, y, tar, clthr=.75, nbfactor=5, use.dist=FALSE, seed=12345, nbtree=NULL, q=0.9)
y <- rc$part
## -----------------------------------------------------------------------------
x <- rc$xf
## -----------------------------------------------------------------------------
x <- getFeat(v,y,tar,fpv=0.01)
## -----------------------------------------------------------------------------
tar <- c(6,9,13)
x <- intestine$x
y <- intestine$y
fb <- fateBias(x, y, tar, z=NULL, minnr=5, minnrh=10, adapt=TRUE, confidence=0.75, nbfactor=5, use.dist=FALSE, seed=12345, nbtree=NULL)
## -----------------------------------------------------------------------------
dr <- compdr(x, z=NULL, m=c("tsne","cmd","umap"), k=2, tsne.perplexity=30, seed=12345)
## -----------------------------------------------------------------------------
plotFateMap(y,dr,k=2,m="umap")
## ----eval=FALSE---------------------------------------------------------------
# plotFateMap(y,dr,k=3,m="umap")
## -----------------------------------------------------------------------------
plotFateMap(y,dr,k=2,m="umap",fb=fb,g="t6")
## -----------------------------------------------------------------------------
pr <- plotFateMap(y,dr,k=2,m="umap",trthr=.33,fb=fb,prc=TRUE)
## -----------------------------------------------------------------------------
v <- intestine$v
pr <-plotFateMap(y, dr, k=2, m="umap", g=c("Defa20__chr8", "Defa24__chr8"), n="Defa", x=v)
## -----------------------------------------------------------------------------
E <- plotFateMap(y,dr,k=2,m="umap",g="E",fb=fb)
head(E)
## -----------------------------------------------------------------------------
pr <- prcurve(y,fb,dr,k=2,m="umap",trthr=0.33,start=3)
## -----------------------------------------------------------------------------
n <- pr$trc[["t6"]]
## -----------------------------------------------------------------------------
v <- intestine$v
fs <- filterset(v,n=n,minexpr=2,minnumber=1)
## -----------------------------------------------------------------------------
s1d <- getsom(fs,nb=50,alpha=.5)
## -----------------------------------------------------------------------------
ps <- procsom(s1d,corthr=.85,minsom=3)
## -----------------------------------------------------------------------------
fcol <- sample(rainbow(max(y)))
## -----------------------------------------------------------------------------
plotheatmap(ps$nodes.z, xpart=y[n], xcol=fcol, ypart=unique(ps$nodes), xgrid=FALSE, ygrid=TRUE, xlab=FALSE)
## -----------------------------------------------------------------------------
plotheatmap(ps$all.z, xpart=y[n], xcol=fcol, ypart=ps$nodes, xgrid=FALSE, ygrid=TRUE, xlab=FALSE)
## -----------------------------------------------------------------------------
plotheatmap(ps$all.e, xpart=y[n], xcol=fcol, ypart=ps$nodes, xgrid=FALSE, ygrid=TRUE, xlab=FALSE)
## -----------------------------------------------------------------------------
plotheatmap(ps$all.b, xpart=y[n], xcol=fcol, ypart=ps$nodes, xgrid=FALSE, ygrid=TRUE, xlab=FALSE)
## -----------------------------------------------------------------------------
g <- names(ps$nodes)[ps$nodes == 1]
## -----------------------------------------------------------------------------
plotexpression(fs, y, g, n, col=fcol, name="Node 1", cluster=FALSE, alpha=.5, types=NULL)
## -----------------------------------------------------------------------------
plotexpression(fs, y, "Clca4__chr3", n, col=fcol, cluster=FALSE, alpha=.5, types=NULL)
## -----------------------------------------------------------------------------
plotexpression(fs, y, g, n, col=fcol, name="Node 1", cluster=FALSE, types=sub("\\_\\d+","",n))
## -----------------------------------------------------------------------------
group <- head(g,6)
plotexpressionProfile(fs, y, group, n, name="Node 1", cluster=FALSE)
## -----------------------------------------------------------------------------
thr <- .5
a <- "t13"
b <- "t6"
cl <- c(3,4,5)
A <- rownames(fb$probs)[fb$probs[,a] > thr]
A <- A[y[A] %in% cl]
B <- rownames(fb$probs)[fb$probs[,b] > thr]
B <- B[y[B] %in% cl]
de <- diffexpnb(v,A=A,B=B,DESeq=FALSE,norm=FALSE,vfit=NULL,locreg=FALSE)
## -----------------------------------------------------------------------------
plotdiffgenesnb(de,mthr=-4,lthr=0,Aname=a,Bname=b,padj=FALSE)
## -----------------------------------------------------------------------------
gene2gene(intestine$v,intestine$y,"Muc2__chr7","Apoa1__chr9")
## -----------------------------------------------------------------------------
gene2gene(intestine$v, intestine$y, "Muc2__chr7", "Apoa1__chr9", fb=fb, tn="t6", plotnum=FALSE)
## -----------------------------------------------------------------------------
k <- impGenes(fb,"t6")
|
439b416335dcd8d7423894cf3bb317e7d5fe5b35
|
e19b396f0dc7c8e8b489a02e650f4fbee6606d5e
|
/docs/visualisation/exercise_ggplot_09.R
|
eb3a81ca365bae3d49f54c74a8328df3b53251c4
|
[] |
no_license
|
djnavarro/robust-tools
|
0886db364f7048906d0513091e406b5d369ea3a1
|
e3c9f445bd426f687022f928879adfcda24af958
|
refs/heads/master
| 2020-12-21T00:01:27.017012
| 2020-05-30T08:03:12
| 2020-05-30T08:03:12
| 236,247,979
| 24
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,849
|
r
|
exercise_ggplot_09.R
|
# Exercise 5
#
# Luckily for us, there is an upside to my daughter's emoji terrorism.
# What we now have is a kind of "template" that we can use to draw quite
# a lot of new plots. Let's see if we can apply it to some real data!
#
# The "data_forensic.csv" file contains data published by Kristy Martire,
# Bethany Growns and myself, looking at expert and non expert judgments
# about handwriting (which is related to Kristy & Bethany's work in
# forensic science). Let's take a look.
# Getting started ---------------------------------------------------------
# The first part of the exercise is just to look at the data. Select these
# three lines of code (e.g., by clicking and dragging withe mouse, the usual
# way) and then run JUST THE SELECTED LINES. Look in the "Code" menu above
# and choose "Run selected line(s)". Also notice that there is a keyboard
# shortcut to do this... CTRL-ENTER (or COMMAND-ENTER on a Mac).
library(tidyverse)
forensic <- read_csv("data_forensic.csv")
print(forensic)
# There's a good chance R will tell you (in grey at the bottom) that it's
# only showing you some of the columns. If you want to look at the data a
# different way, try using the glimpse() fuction. Again, highlight the line
# and then run JUST THIS LINE
glimpse(forensic)
# Scatterplot: true (x-axis) versus est (y-axis) --------------------------
# Okay, now we can see what all the variables are. Now what we want to do
# is create a scatter plot. The plot we want should be the exact same
# style as the one we drew for the dino data, but we want to use
# the forensic data instead. On the x-axis we want to plot the true value
# (true frequency of a feature), and on the y-axis we want to plot what
# the participant guessed:
picture <- 🎨(🙂 = 💖) +
🎨(🙂 = 🎨(🙂 = 💖, 🙂 = 💖))
plot(picture)
|
e107fa0b8132a5fc9d77784d302c7f81a50d36bd
|
5885524cbba3f812c1eddc1912ec33304b54c1d4
|
/man/theme_bailey.Rd
|
b71d38d1d02e585c9fe57889bba17506b370c593
|
[
"MIT"
] |
permissive
|
jackobailey/jbmisc
|
a198ab9855800dfcd8bd2c9d639f6dc70413ddef
|
960ce7852fe3144359972500b2858a2976323dff
|
refs/heads/master
| 2021-12-14T19:01:18.073680
| 2021-12-09T20:29:04
| 2021-12-09T20:29:04
| 142,127,918
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 414
|
rd
|
theme_bailey.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/theme_bailey.R
\name{theme_bailey}
\alias{theme_bailey}
\title{Jack Bailey's Custom ggplot Theme.}
\usage{
theme_bailey()
}
\description{
This is a convenience function that makes it easier for me to make nice plots. It is an extension of theme_minimal() in ggplot2. It removes some lines, adds others, and changes the font to Cabin.
}
|
bfaa9535aad14304535eb8d06022e6ab95d391f3
|
feeaffa6c69c7d4b93ea404a5fc7065f4b7ff6c6
|
/TidyTuesday_TaylorSwift.R
|
72526cfd725568d948f82bc39a951b6f721bb71a
|
[] |
no_license
|
anhnguyendepocen/TidyTuesday
|
c84adfd1558fe60bcce34f4474efa77f7eec941d
|
6a14f8f9a9fbef28102ec5e0bd3911ee5bd06b8d
|
refs/heads/master
| 2023-02-02T15:26:01.128129
| 2020-12-15T16:09:03
| 2020-12-15T16:09:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,259
|
r
|
TidyTuesday_TaylorSwift.R
|
#TidyTuesday
#===============================================================================
#Taylor Swift
#@sil_aarts
#===============================================================================
library(tidyverse)
library(cowplot)
library(showtext)
library(showtextdb)
library(stringr)
library(reshape2)
library(gghighlight)
#Read file
data <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-09-29/sales.csv')
#Get date
data1 <- data %>%
separate(released, ",", into = c("month", "year"), remove = FALSE)%>%
separate(year, " ", into = c("extra", "year2"), remove = FALSE)
#Choose font
font_add_google("Faster One", "D")
font_add_google("Just Another Hand", "K")
font_add_google("Cabin Sketch", "L")
showtext_auto()
#Theme
theme_sil <- theme_classic()+
theme(
legend.background = element_rect(fill="#AE7EA8", linetype="dashed", colour ="#AE7EA8"),
legend.title= element_blank(),
plot.background = element_rect(fill = "#AE7EA8", colour = "#AE7EA8"),
panel.background = element_rect(fill = "#AE7EA8", colour = "#AE7EA8"),
plot.caption = element_text(colour = "black", size = 7, hjust= 0.7, margin = margin(4, 0, 0, 0), family="L"),
axis.ticks = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_text(colour="black", size=17, family="L"),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.line.y = element_blank(),
axis.line.x = element_blank(),
panel.grid.major.y = element_line(colour="#FFE6FF"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.margin = unit(c(0.5,1,1,1), "cm")) #trbl
#Title
title <- ggplot() +
labs(title = "Taylor Swift",
subtitle = "Record sales in $ per year. Bigger 'records' are indicative for more $.\nHighlighted are the records with more than 1 million $ in sales.")+
theme_sil+
theme(
plot.background = element_rect(fill = "#AE7EA8", colour = "#AE7EA8"),
panel.background = element_rect(fill = "#AE7EA8", colour = "#AE7EA8"),
plot.title = element_text(color="black", size=60, hjust=0.5, margin = margin(2, 2, 3, 0), family="K"),
plot.subtitle = element_text(color="black", size=20, hjust=0.6, margin = margin(0, 0, 0, 0), family="L"),
plot.margin = unit(c(1,0,0,0), "cm")) #trbl
#Make some colours
col_numb <- 14
mycols <- c("#B85208","#B93947","#9A3C6E","#3E4C72", "#00937D")
mycolors <- colorRampPalette(mycols)(col_numb)
#GGplot
p <- ggplot(data1, aes(x=year2, y=sales)) +
geom_point(aes(size = sales, colour=title), alpha=0.9, stroke=12, colour="black")+
geom_point(aes(size = sales, colour=title), alpha=0.8, fill="gray70", colour="gray70", size=8)+
geom_point(aes(size = sales, colour=title), alpha=0.7, fill="black", colour="white", size=0.7)+
gghighlight(sales > 1000000, unhighlighted_params = list(colour = alpha("gray70", 0.7)))+
labs(caption = "Source: Rosie Baillie & Dr. Sara Stoudt | Plot by @sil_aarts")+
scale_size_continuous(range = c(2,20))+
guides(size = FALSE)+
coord_flip(clip = "off")+
theme_sil
#Quartz
quartz(type="Cocoa")
#Run it
p
#Combine plots
plots <- plot_grid(title, p, nrow=2, ncol=1, rel_widths=c(15,15), rel_heights=c(8,25))
plots
|
0fcc44f3876000701ff4f9c51f5f4bb3a7932a7b
|
8ef27de17d0110828d77ca91b4f4e71af73fc12f
|
/man/wormsPE.Rd
|
8d9097e62fcf04e8d3cd669d34d5aebca7470dea
|
[] |
no_license
|
marcpaga/pulsedSilac
|
95537ce75dc65a9573186708b2917ac700c7cbe6
|
23e5e48083b5edfc99c5dbc42bef487610bec5af
|
refs/heads/master
| 2020-05-17T09:29:16.236700
| 2020-03-07T12:58:50
| 2020-03-07T12:58:50
| 183,634,007
| 2
| 0
| null | 2019-12-05T09:54:02
| 2019-04-26T13:31:31
|
R
|
UTF-8
|
R
| false
| true
| 1,887
|
rd
|
wormsPE.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{wormsPE}
\alias{wormsPE}
\title{ProteomicsExperiment with pulsed silac data from C. elegans strains}
\format{A \code{SilacProteomicsExperiment} object with 250 proteins and 3574
peptides in a total of 14 samples.
\describe{
\item{colData}{A \code{DataFrame} with the design of the experiment:
samples, timepoints, replicates...}
\item{assaysProt}{A list of matrices with quantification data at protein
level: total intensity (int_total), light isotope intensity (int_light),
heavy isotope intensity (int_heavy) and heavy/light isotope intensty ratio
(ratio).}
\item{rowDataProt}{A \code{DataFrame} with 22 columns that contains general
protein information: ids, gene names, molecular weight...}
\item{assaysPep}{A list of matrices with quantification data at peptide
level: total intensity (int_total), light isotope intensity (int_light),
heavy isotope intensity (int_heavy) and heavy/light isotope intensty ratio
(ratio).}
\item{rowDataPept}{A \code{DataFrame} with 46 columns that contains general
protein information: ids, amino acids counts, length...}
\item{linkerDf}{A \code{data.frame} with 3574 rows and 4 columns. It
contains the relationships between proteins and peptides in the
ProteomicsExperiment object.}
}}
\usage{
data(wormsPE)
}
\description{
A pre-built \code{SilacProteomicsExperiment} object with data from a pulsed
silac experiment done in \emph{C. elegans} by Visscher et al. 2016. It only
contains the data from the first 250 priteins and two old worms strains
(OW40 and OW450).
}
\details{
It is used as example in the pulsed silac vignette to illustrate the main
data analysis functions and in the examples of the documentation.
}
\references{
\url{https://www.ncbi.nlm.nih.gov/pubmed/28679685}
}
\keyword{datasets}
|
e18ecc598152320677d7a132f1d177a0f2397266
|
9431f2901a1f0efa409ef6ee81ce5e825dcf1193
|
/man/saveInterpolatedPanels.Rd
|
692c3734887cd4adf916c38d95b3693d1b1c4002
|
[] |
no_license
|
piotrek-orlowski/impliedCF
|
3304069a1fd6b5ff07b8ff6c281eb841316c87ec
|
27ed2e5947b6855c72156d62987a54719f342450
|
refs/heads/master
| 2021-03-19T11:22:14.143712
| 2018-07-17T20:46:23
| 2018-07-17T20:46:23
| 123,603,931
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,253
|
rd
|
saveInterpolatedPanels.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interpolateAndSavePanels.R
\name{saveInterpolatedPanels}
\alias{saveInterpolatedPanels}
\title{Calculate the implied GFT for a series of option panels, using a square-root frequency scaling and for fixed maturities.}
\usage{
saveInterpolatedPanels(panels.list, out.mat, output.name, doParallel = 2,
lib.dir = "test.lib.dir", ...)
}
\arguments{
\item{panels.list}{A list version of the option panels, as outputted by \code{\link{panelsToList}}}
\item{out.mat}{Sx2 matrix, where first column specifies target maturities and the second specifies the number of options available at each maturity (for now they have to be equal).}
\item{output.name}{The file name where the interpolated igft-s will be saved}
\item{doParallel}{If integer, specifies the number of cores to be used by starting a cluster with the \code{parallel} package. If \code{foreach}, an extant parallel backend will be used with the \code{foreach} package.}
\item{k.shrink}{what?!}
}
\description{
This function takes as input a vector of strings corresponding to option panels and then calculates the implied GFT for a given frequency rule. Then the implied GFT is interpolated for fixed maturities.
}
|
f28de65cfb36e09e369176b3bb4fc48af2cb4b80
|
3365692614767c738e38448f3a5d022cbe0ca45a
|
/trait_correlations.R
|
1724f310c68247ebe271e6962bcd2c891f0550a3
|
[] |
no_license
|
LanceStasinski/Dryas2
|
89ac26c1a19cd3dabc9636b26967e24d1df75928
|
049f4461b0251174be4f0e208795333ea9a3c935
|
refs/heads/master
| 2023-06-02T02:32:56.480834
| 2021-06-17T17:33:59
| 2021-06-17T17:33:59
| 268,616,307
| 2
| 3
| null | 2021-03-22T20:17:30
| 2020-06-01T19:43:25
|
R
|
UTF-8
|
R
| false
| false
| 1,403
|
r
|
trait_correlations.R
|
library(corrplot)
library(plyr)
library(vegan)
library(ggplot2)
library(ggpubr)
setwd("C:/Users/istas/OneDrive/Documents/Dryas Research/Dryas 2.0")
leaf = read.csv('morphology.csv', stringsAsFactors = F)
#data sets
traits = leaf[, -c(1,2,3,4,10,11,12)]
parent = traits[!traits$Species == 'DX',]
parent = parent[,-5]
hyb = traits[traits$Species == 'DX',]
hyb = hyb[,-5]
#parent trait correlations
p.c = cor(parent, method = 'pearson')
colnames(p.c) <- c('Glands', 'Scales', 'Length', 'Tomentum')
rownames(p.c) <- c('Glands', 'Scales', 'Length', 'Tomentum')
#hybrid trait correlations
hyb.c = cor(hyb, method = 'pearson')
colnames(hyb.c) <-c('Glands', 'Scales', 'Length', 'Tomentum')
rownames(hyb.c) <- c('Glands', 'Scales', 'Length', 'Tomentum')
#corrplots
cols = colorRampPalette(c('#2166ac', '#d73027'))
par(mfrow = c(1,2))
corrplot::corrplot(p.c, method = "number", type = "lower", diag = F, cl.cex = 1,
addCoef.col = 'black', tl.col = 'black', cl.length = 5,
col = cols(15), number.cex = 1.25)
mtext('Parent Leaves', side = 2, line = -1, at = 2, cex = 1.5)
corrplot::corrplot(hyb.c, method = "number", type = 'lower', diag = F,
cl.cex = 1, addCoef.col = 'black',
tl.col = 'black', cl.length = 5, col = cols(15),
number.cex = 1.25)
mtext('Hybrid Leaves', side = 2, line = -1, at = 2, cex = 1.5)
|
7eda8daee67c79594b8f241134a3a04442dc47fa
|
7a4fe2d2d49906518c381d30591bef944ac2b9f7
|
/Bike Riders - Week 10/Bike Rider Subscription.R
|
2d51e3834ecac58c601e5f5a3a3bd588eff1833f
|
[] |
no_license
|
Choke77/Tidy-Tuesday-Projects
|
93f942c566882a719b4773b0b307837b34ee38ca
|
cf66f8db03f49b0798cb7aaa77c047bdb7a5d7d0
|
refs/heads/master
| 2020-08-08T09:21:36.849847
| 2019-10-03T21:44:24
| 2019-10-03T21:44:24
| 213,803,786
| 0
| 0
| null | 2019-10-09T02:38:24
| 2019-10-09T02:38:24
| null |
UTF-8
|
R
| false
| false
| 1,297
|
r
|
Bike Rider Subscription.R
|
library(tidyverse)
library(lubridate)
filesToLoad = list.files()
fileList = lapply(filesToLoad,read.csv)
dataset = do.call("rbind",fileList)
rm(fileList)
summary(dataset)
#Clean the dataset
df <- dataset %>%
filter(Duration != "",
Distance_Miles != "",
!is.na(StartLatitude),
!is.na(StartLongitude),
Distance_Miles < 100
) %>%
mutate(StartDate = mdy(StartDate),
EndDate = mdy(EndDate),
StartTime = hm(StartTime),
EndTime = hm(EndTime),
RouteID = as.character(RouteID),
BikeID = as.character(BikeID),
Duration = as.numeric(Duration))
df_heatmap <- df %>%
mutate(year = year(StartDate),
month = month(StartDate, label=TRUE),
day = day(StartDate),
hour = hour(StartTime) ) %>%
group_by(day, month) %>%
summarise(count = n())
head(df_heatmap)
ggplot(df_heatmap,aes(day,month, fill = count))+
geom_tile(color= "white",size=0.1) +
scale_y_continuous(trans = "reverse", breaks = unique(df_heatmap$month))+
scale_x_continuous(breaks =c(1,10,20,31))
#Start hub with highest number of trips (map)
#Most common end trip stations
#Trips by Day and Hour (1 month)
#Change in Bike share subscribers(ove all months)
|
106013393eb0e5bf230e0d1d11889a3cac59c2e3
|
46e0d1707d8c24168b6837071168ed8714e48770
|
/R/Data Science/Statistic Inference/Project/Simulation.R
|
bb5d3a75cca9364e0685a3501e7f8a56651a08fd
|
[] |
no_license
|
hyqLeonardo/Hacker
|
fd7c112198adfa9278f978dcef36d6e7a82ebc77
|
9e9b6384c3d8d05cd9ab56cba11ec2c1357426d5
|
refs/heads/master
| 2020-04-28T23:20:44.778614
| 2015-02-22T09:38:39
| 2015-02-22T09:38:39
| 30,788,527
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 462
|
r
|
Simulation.R
|
nosim <- 1000
lambda <- 0.2
cfunc <- function(x, n) sqrt(n) * (mean(x) - 1/lambda) / (1/lambda)
dat <- data.frame(
x = c(
apply(matrix(rexp(nosim * 40, lambda),
nosim), 1, cfunc, 40)
),
size = factor(rep(c(40), rep(nosim, 1))))
g <- ggplot(dat, aes(x = x, fill = size)) + geom_histogram(alpha = .20, binwidth=.3, colour = "black", aes(y = ..density..))
g <- g + stat_function(fun = dnorm, size = 2)
g + facet_grid(. ~ size)
|
cae80258aea906a823b3d5b74c6b5ef5aea5413c
|
543f60c0dd71a6eb227c0d8176b38d7e3554b00e
|
/Femalefertility Linear Regression.R
|
8223905565d4c5e3f0cc7b72d3772fb5511d2275
|
[] |
no_license
|
ChandanNaik24/Data-Science-
|
b9e60e31f8b33211708e1f69691b1bdfabfd3714
|
f6dcf43d57a37f81bb28b54eb1ebd508943ff941
|
refs/heads/master
| 2023-04-16T10:12:37.658918
| 2021-04-15T06:01:35
| 2021-04-15T06:01:35
| 287,471,833
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 733
|
r
|
Femalefertility Linear Regression.R
|
### Loading the CVS file to R Studio cloud ###
data <- SLR2
### EDA ###
summary(data)
dataframe1 <- summary
### Plot ###
plot(data)
dataframe2 <- plot
### Checking for outliers ###
boxplot(data$worker.percent,horizontal = T)
boxplot(data$fertility.rate, horizontal = T)
boxplot(data$fertility.rate,data$worker.percent, horizontal = T)
### CHecking the Normality ###
qqnorm(data$fertility.rate)
qqnorm(data$worker.percent)
### Assigning Variables ###
y <- data$worker.percent
x <- data$fertility.rate
### Linear Regression ###
model <- lm(y~x, data = data)
summary(model)
### Prediction ###
new_data = data.frame(x=c(3,9,1,2,8))
pred <- predict.lm(model,newdata = new_data)
pred
|
cc7643f1e67df75d58e423bb9acb1fc3f7b79536
|
98eaa4e4992701d44c3637f292ded9b26017f9f1
|
/man/cubehelix.Rd
|
c594d474a6ee975620dbdeb73c37ef157707eaf1
|
[] |
no_license
|
nplatonov/ursa
|
4e946f4bddea1b947200953f0570c01a0b8734c1
|
ecb0b4693b470a9bc4df34188b4925589b8d3988
|
refs/heads/master
| 2023-06-26T12:12:58.134465
| 2023-06-13T10:01:34
| 2023-06-13T10:01:34
| 46,657,847
| 7
| 2
| null | 2023-09-10T18:03:27
| 2015-11-22T11:34:23
|
R
|
UTF-8
|
R
| false
| false
| 6,446
|
rd
|
cubehelix.Rd
|
\name{cubehelix}
\alias{cubehelix}
\title{
Generate "cubehelix" palette.
}
\description{
\code{cubehelix} returns set of RGB colours, which are screen display of intensity images
}
\usage{
cubehelix(n, value = numeric(), weak = NA, rich = NA, rotate = NA, hue = NA, gamma = 1,
dark = NA, light = NA, bright = NA, inv = NA, verbose = NA)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{n}{
Positive integer. Length of returned color vector. If \code{n} is \link[base:missing]{missing} and length of \code{value} is positive, then length of \code{value}. If missing \code{n} and empty \code{value}, then \code{n=256}.
}
\item{value}{
Numeric vector of values, which are associated with a palette. If both positive and negative values are in this vector, then divergence color palette is returned. Default in numeric of length zero (unspecified).
}
\item{weak}{
Numeric. The angle (in degrees) of the helix for color with \code{light} intensity. If both \code{rich} and \code{weak} are specified, the \code{rotate} is defined as difference between \code{rich} and \code{weak}. If all \code{weak}, \code{rich} and \code{rotate} are unspecified, then random values are used. Default is \code{NA} (unspecified).
}
\item{rich}{
Numeric. The angle (in degrees) of the helix for color with \code{dark} intensity. If both \code{rich} and \code{weak} are specified, the \code{rotate} is defined as difference between \code{rich} and \code{weak}. If all \code{weak}, \code{rich} and \code{rotate} are unspecified, then random values are used. Default is \code{NA} (unspecified).
}
\item{rotate}{
Numeric. The angle of rotation (in degrees) of the helix over the scale; can be negative. If \code{rotate} and \code{weak} are specified, then \code{rich} is defined as sum of \code{weak} and \code{rotate}. If \code{rotate} and \code{rich} are specified, then \code{weak} is defined as difference between \code{rotate} and \code{weak}. If all \code{weak}, \code{rich} and \code{rotate} are unspecified, then random values are used. Default is \code{NA} (unspecified).
}
\item{hue}{
Non-negative numeric. Saturation of color. \code{hue=0} gives pure greyscale. If unspecified, then random value in interval [0.9, 1.5] is used. Default is \code{NA} (unspecified).
}
\item{gamma}{
Numeric. Power of intensity. Intensity is between \code{dark} and \code{light}, which are normalized to interval [0, 1]. \code{gamma} changes normalized intensity to intensity\code{^gamma}. Default is 1.
}
\item{dark}{
Positive numeric in interval between 0 and 255. The intensity of the darkest color in the palette. For light backgrounds default is 63. For dark backgrounds default is 14 (inverse order with \code{light}).
}
\item{light}{
Positive numeric in interval between 0 and 255. The intensity of the lightest color in the palette. For light backgrounds default is 241, for dark backgrounds default is 192 (inverse order with \code{dark}).
}
\item{bright}{
Positive numeric in interval between 0 and 255. Value for equal intensity for code{dark}and \code{light} in the palette. Applied only for both \code{dark=NA} and \code{light=NA}.
}
\item{inv}{
Logical. Inversion of color intensity. If \code{TRUE} then color vector is \link[base:rev]{reversed} before return. Default is \code{FALSE}.
}
\item{verbose}{
Logical. Value \code{TRUE} provides information about cube helix on console. Default is \code{NA}, which is interpreted as \code{FALSE}.
}
}
\details{
This is modified source code of function \code{cubeHelix()} from package \pkg{rje} under GPL>=2 license.
The palette design is oriented that figures can be printed on white paper. Under this assumption, light color is for small values, and dark color is for big values. In some computer vision and GIS software black background is used, and in this case light color for big values, and dark color of small values looks more naturally. For some thematic maps big values are light, and small values are small (for example, sea ice concentration: open water is blue, close ice is white). RGB and Grayscale remote sensing and photo imagery use light colors for strong signal, and dark colors for weak signal.
Light background is default for figure (specified by argument \code{background} in function \code{\link[ursa:compose_open]{compose_open}}).
%%~ and for image panels (specified by argument \code{fill} in function \code{\link[ursa:panel_new]{panel_new}}).
The palette divergency can be defined only if \code{value} is specified. If all values are positive, or all values are negative, then returned palette is not drivergent. For divergent palettes the helix sequence is continuous.
If \code{dark} and \code{lihgt} are unspecified, the color contrast bewteen \code{dark} and \code{light} drops on reducing number of colors in returned vector.
}
\value{
Vector of RGB color specification.
}
\references{
\href{https://www.mrao.cam.ac.uk/~dag/CUBEHELIX/}{Dave Green's `cubehelix' colour scheme.}
Green, D. A., 2011, `A colour scheme for the display of astronomical intensity images', Bulletin of the Astronomical Society of India, 39, 289. http://astron-soc.in/bulletin/11June/289392011.pdf \href{https://arxiv.org/pdf/1108.5083.pdf}{(pre-print at 'arxiv.org')}
\code{rje::cubeHelix()}; \pkg{rje} at CRAN: \url{https://CRAN.R-project.org/package=rje}
}
\author{
Dave Green
Robin Evans
Nikita Platonov \email{platonov@sevin.ru}
}
%%~ \note{
%%~ %% ~~further notes~~
%%~ }
\section{Acknowledgements}{
Dave Green, Robin Evans
}
%%~ \seealso{
%%~ Original source code \code{\link[rje:cubeHelix]{rje::cubeHelix}} (valid link if package \pkg{rje} is installed), or see CRAN reference.
%%~ }
\examples{
session_grid(NULL)
set.seed(352)
session_grid(regrid(mul=1/16))
a <- ursa_dummy(3,min=0,max=255)
b4 <- b3 <- b2 <- b1 <- vector("list",length(a))
for (i in seq_along(b1)) {
b1[[i]] <- colorize(a[i],pal=cubehelix(11,weak=45*i,rotate=+270),ncolor=11)
b2[[i]] <- colorize(a[i],pal=cubehelix(11,weak=45*i,rotate=-270),ncolor=11)
b3[[i]] <- colorize(a[i]-127,pal=cubehelix)
hue <- sample(seq(2)-1,1)
s <- ifelse(hue==0,NA,runif(1,min=91,max=223))
b4[[i]] <- colorize(a[i]-127,pal=cubehelix,pal.hue=hue,pal.dark=s,pal.light=s)
}
display(c(b1,b2),layout=c(2,NA),decor=FALSE)
display(c(b3,b4),layout=c(2,NA),decor=FALSE)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{color}
|
fae11929262d4cb46a8cd58cd1011d1b4893191a
|
0ccb2ef7d5d608d9c33ec1b68c176c17a7a3d888
|
/whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/code/2_whole_genome_ERPRHER2Grade_fixed_baseline.R
|
8e9c4ba58b50bece78f2260bb07dd51472f2d507
|
[] |
no_license
|
andrewhaoyu/breast_cancer_data_analysis
|
dce6788aa526a9a35fcab73564a457e8fabb5275
|
d84441e315e3ce135149e111014fa9807228ee7c
|
refs/heads/master
| 2023-05-31T20:59:44.353902
| 2023-05-15T19:23:30
| 2023-05-15T19:23:30
| 103,444,023
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,618
|
r
|
2_whole_genome_ERPRHER2Grade_fixed_baseline.R
|
rm(list=ls())
#i1 represent the index of genotype file
#i1 ranges from 1 to 596
arg <- commandArgs(trailingOnly=T)
i1 <- as.numeric(arg[[1]])
print(i1)
library(R.utils)
library(data.table)
library(devtools)
library(withr)
library(gtools)
library(doParallel)
library(foreach)
#install R package
#bc2 is a development version of TOP package
#I used bc2 in my previous analyses
#the function of bc2 and TOP are almost the same
#TOP has more documentation
#to install bc2 or TOP, one needs to use install_github function
#you can specify the directory to your local directory
#with_libpaths(new = "/home/zhangh24/R/x86_64-pc-linux-gnu-library/4.2/", install_github('andrewhaoyu/bc2'))
library(bc2,
lib.loc ="/home/zhangh24/R/x86_64-pc-linux-gnu-library/4.2/")
setwd("/data/zhangh24/breast_cancer_data_analysis/")
#imputation file subject order
if(i1<=564){
subject.file <- "/data/NC_BW/icogs_onco/genotype/imputed2/icogs_order.txt.gz"
Icog.order <- read.table(gzfile(subject.file))
}else{
subject.file <- "/data/NC_BW/icogs_onco/genotype/imputed2/icogs_order_23.txt.gz"
Icog.order <- read.table(gzfile(subject.file))
}
setwd("/data/zhangh24/breast_cancer_data_analysis/")
#load the phenotypes data
data1 <- fread("./data/iCOGS_euro_v10_10232017.csv",header=T)
data1 <- as.data.frame(data1)
y.pheno.mis1 <- cbind(data1$Behaviour1,data1$ER_status1,data1$PR_status1,data1$HER2_status1,data1$Grade1)
colnames(y.pheno.mis1) = c("Behavior","ER","PR","HER2","Grade")
#x.test.all.mis1 <- data1[,c(27:206)]
SG_ID <- data1$SG_ID
#load the covariates for the model: PC1-10, age
x.covar.mis1 <- data1[,c(5:14,204)]
age <- data1[,204]
#find the people with missing ages
idx.incomplete <- which(age==888)
table(y.pheno.mis1[idx.incomplete,1])
idx.complete <- which(age!=888)
#remove people with missing age
y.pheno.mis1 <- y.pheno.mis1[idx.complete,]
x.covar.mis1 <- x.covar.mis1[idx.complete,]
SG_ID <- SG_ID[idx.complete]
#number of subject in the genotype file is n
n <- length(Icog.order[,1])
#creat a intial value for snpvalue
snpvalue <- rep(0,n)
#the phenotype data is a subset a the genotype data
#find the correponding subset
idx.fil <- Icog.order[,1]%in%SG_ID
#match the phenotype data with genotype data
idx.match <- match(SG_ID,Icog.order[idx.fil,1])
#idx.fil and idx.match will be used in later step for matching phenotype and genotype
#load the null hypothesis results for other covariates
#this component will be needed in later ScoreTest
load("./whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/score.test.support.icog.ERPRHER2Grade.Rdata")
#load all the imputed files
Filesdir <- "/data/NC_BW/icogs_onco/genotype/imputed2/icogs_imputed/"
Files <- dir(Filesdir,pattern="icogs_merged_b1_12.",full.names=T)
#order the imputed files
Files <- mixedsort(Files)
#specific one filegeno.file
geno.file <- Files[i1]
#count the number of variants in the file
num <- as.integer(system(paste0("zcat ",geno.file,"| wc -l"),intern=T))
#num = 10
#number of tumor characteristis is four
num.of.tumor <- ncol(y.pheno.mis1)-1
#number of subject in the phenotype files
n.sub <- nrow(y.pheno.mis1)
idx.control <- which(y.pheno.mis1[,1]==0)
#count the number of control in the data
n.control <- length(idx.control)
#get the three different z design matrix
z.design.list = GenerateZDesignCombination(y.pheno.mis1)
z.additive = z.design.list[[1]]
z.interaction = z.design.list[[2]]
z.saturated = z.design.list[[3]]
#number of second stage parameters
#if use additive model
n.second = ncol(z.additive)
#if use pair-wise interaction model
#n.second = ncol(z.interaction)
#if use saturated model
#n.second = ncol(z.saturated)
#parallel computing with foreach function
#the default of biowulf job allocation is two cores
#without parallel, we are only using 50% of the computing resources
#the job is running on two cores simultaneously
#parallel computing is faster, but sometimes also hard to debug
#it's also okay to just use a single for loop
#single for loop is easier to debug
#here I am splitting the jobs onto two cores
no.cores <- 2
inner.size <- 2
registerDoParallel(no.cores)
result.list <- foreach(job.i = 1:inner.size)%dopar%{
print(job.i)
#startend is a function in bc2 package
#specific the total loop number, the number of inner jobs
#startend will equally split the total loop
#startend will return with the start and the end of the job line
#job.i is the index of the inner jobs
#for example, if num = 10, inner.size =2, job.i = 1, then start = 1, end = 5
#for example, if num = 10, inner.size =2, job.i = 2, then start = 6, end = 10
start.end <- startend(num,inner.size,job.i)
start <- start.end[1]
end <- start.end[2]
inner.num <- end-start+1
#score_matrix, each row is the score vector for a genetic marker
score_result <- matrix(0,inner.num,n.second)
#information matrix, each row is the as.vector(information matrix) for a genetic marker
infor_result <- matrix(0,inner.num,(n.second)^2)
#snpid information
snpid_result <- rep("c",inner.num)
#frequencies of the genetic marker
freq.all <- rep(0,inner.num)
temp <- 0
#open the file
con <- gzfile(geno.file)
open(con)
for(i in 1:num){
#print the index every 500 SNPs
#if(i%%500==0){
print(i)
#}
#read one line of genetic file
oneLine <- readLines(con,n=1)
#the total number of SNPs are split into two sub-jobs
#only start run the test after the start location
if(i>=start){
temp <- temp+1
#the readLine result is a vector
myVector <- strsplit(oneLine," ")
#load the SNP ID
snpid <- as.character(myVector[[1]][2])
snpid_result[temp] <- snpid
snpvalue <- rep(0,n)
#load the imputed score for the genetic marker
#3 * number of subjects length
#every three columns are the probality for aa, Aa, AA for one subject
snppro <- as.numeric(unlist(myVector)[6:length(myVector[[1]])])
if(length(snppro)!=(3*n)){
break
}
#calculate the expected genotype score of the subject. Value between 0 to 2.
snpvalue <- convert(snppro,n)
#match the genotype to the phenotype data
snpvalue <- snpvalue[idx.fil][idx.match]
#calculate the allele frequencies only use controls
snpvalue.control <- snpvalue[idx.control]
freq <- sum(snpvalue.control)/(2*n.control)
freq.all[temp] <- freq
#print(paste0("freq",freq))
#only keep SNPs with allele frequency between 0.006 to 0.994
if(freq<0.006|freq>0.994){
#if the SNP is too rare, just keep as score 0.
score_result[temp,] <- 0
infor_result[temp,] <- 0.1
}else{
#fit the ScoreTest
#change second.stage.structure to second.stage.structure = pairwise.interaction for interaction model
#change second.stage.structure to second.stage.structure = saturated for saturated
score.test.icog<- ScoreTest(y=y.pheno.mis1,
x=snpvalue,
second.stage.structure="additive",
score.test.support=score.test.support.icog.ERPRHER2Grade,
missingTumorIndicator=888)
#the first element is score
score_result[temp,] <- score.test.icog[[1]]
#the second element is the efficient information matrix
infor_result[temp,] <- as.vector(score.test.icog[[2]])
}
}
if(i==end){
break
}
}
close(con)
result <- list(snpid_result,score_result,infor_result,freq.all)
return(result)
}
stopImplicitCluster()
#the output of foreach is saved as two list
#the attached code combine the two list as one
score_result <- matrix(0,num,n.second)
infor_result <- matrix(0,num,(n.second)^2)
snpid_result <- rep("c",num)
freq.all <- rep(0,num)
total <- 0
for(i in 1:inner.size){
result.temp <- result.list[[i]]
temp <- length(result.temp[[1]])
snpid_result[total+(1:temp)] <- result.temp[[1]]
score_result[total+(1:temp),] <- result.temp[[2]]
infor_result[total+(1:temp),] <- result.temp[[3]]
freq.all[total+(1:temp)] <- result.temp[[4]]
total <- total+temp
}
result <- list(snpid_reuslt=snpid_result,score_result=score_result,infor_result=infor_result,freq.all=freq.all)
#change the directory to your local directory
save(result,file=paste0("./whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/ERPRHER2Grade_fixed_baseline",i1))
|
1e18c24039275ba67a4f96d61f1c45486638aa8e
|
df562e5ef9ea2846cb05319114009c3de7e4dee1
|
/MasterR/MachineLearning.R
|
961c41b767e90e3baad74d247043aad25b1375bc
|
[] |
no_license
|
SCelisV/R
|
a05d9dc1b0bcb2bfabfbe83703db8364edd8a9ab
|
0aa0a984dae0c0466addbf6dc0dd629d863f7cf5
|
refs/heads/master
| 2022-12-23T23:23:40.878996
| 2020-09-30T20:10:21
| 2020-09-30T20:10:21
| 286,298,641
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 42,559
|
r
|
MachineLearning.R
|
# ML.R
# http://archive.ics.uci.edu/ml/datasets.php => dataSets
# Machine learning => automatiza la construcción de un módelo analítico, uso de algoritmos que aprende de los datos.
# Algoritmos: Supervisado, NO Supervisado, de Refuerzo
# Supervisado => necesita datos previamente etiquetados(lo que es correcto y lo que no es correcto) para aprender a realizar el trabajo. En base a esto, el algoritmo aprenderá a resolver problemas futuros similares.
# 1. -> Regresión líneal,
# 2. -> Regresión logística
# 3. -> Máquinas de Soporte Vectorial -> SVM
# NO Supervisado => necesita indicaciones previas, No necesita datos previamente etiquetados. Aprende a comprender y a analizar la información. Práctica sobre los datos que tiene.
# 1. -> K vecinos más cercanos - KNN
# 2. -> Árboles de decisión : https://web.fdi.ucm.es/posgrado/conferencias/JorgeMartin-slides.pdf
# 3. -> Random Forest -> Bosques Aleatorios
# 4. -> k-medias -> no supervisado, usado para clusterización
# de Refuerzo => aprende por su cuenta, en base a conocimientos introducidos previamente, aprende en función del éxito ó fracaso.
# 1. -> Regresión líneal - Supervisado - Aproximación que modela una relación entre una variable escalar dependiente ("Y") y una o más variables explicativas ("X")
# Dibujará una recta que nos indicará la tendencia del conjunto de datos, y nos ayudará a predecir en función de un valor X un valor Y.
# http://archive.ics.uci.edu/ml/datasets/Student+Performance => Student Performance => hadoop@ubuntu-hokkaido-3568:~/R/Data/workSpace/MasterR$
datos.18 <- read.csv('student-mat.csv', sep=";")
datos.18
# names(datos.18) # con esto vemos los nombres de las variables
# summary(datos.18)
# str(datos.18)
# 'data.frame': 395 obs. of 33 variables:
# head(datos.18)
# length ( datos.18 )
# [1] 33
# n = length ( datos.18 [[1]])
# n
# [1] 395
# Estimar el valor de la variable " G3 -> final grade (numeric: from 0 to 20, output target) ", en función de ToDos los valores del dataset ( G3 ~ .)
# limpieza de datos
# verificar los null's
NULOS <- any(is.na(datos.18))
print (NULOS)
# si NULOS => FALSE => NO hay null's
library(ggplot2)
# library(ggthemes)
# library(dplyr)
help(lm)
# para saber si una columna de un set de datos es numerica
# is.numeric(datos.18$age)
# [1] TRUE
# crear un df con las columnas que son numericas en el dataSet
columnas.numericas <- sapply(datos.18, is.numeric)
# calculamos el coeficiente de CORrelación
# NO Diagonales => Correlación —> entran escalados entre -1 y 1.
# Si las variables están muy correlacionadas R tendrá valor entre -1 y 1.
# Existen dos tipos de correlaciones tanto negativas como positivas
# Correlación positiva => > 0 , aumenta una variable por tanto aumenta la otra.
# ex: Estatura - Peso - crecen a la vez.
# Correlación negativa => < 0, aumenta una variable y disminuye la otra.
# ex: Velocidad vs Autonomía. > aceleración < autonomía
# Cuando están más cercanos a 1, la nube de puntos va a ser más perfecta a una línea.
# si hay alguna correlación que me dé 1.00000000 podemos decir que es una
# correlación perfecta., ó que esas dos variables son la misma variable, independiente de las
# unidades en que este medida.
# correlación baja está entre -0,2 y +0,2
# Valores altos => cercano a -1 ó +1 —> en la primera fila y primera columna.
# Valores bajos => cercanos a 0 => resto de elementos entre la segunda fila y la diagonal.
# R: = 0 => Indica que no tienen nada de correlación.
# R: = 1 => Correlación Perfecta.
coef.correlacion <- round(cor(datos.18[,columnas.numericas]),2)
print(coef.correlacion)
hist(datos.18$G3)
# calculamos la matriz de VARianzas - En este caso no es necesario
# Diagonales => Varianza
# NO Diagonales => COVarianza
mat.varianzas <- round(var(datos.18[,columnas.numericas]),2)
print(mat.varianzas)
# Gráfico de correlación múltiple = Cuanto más definida está el gráfico hay más correlación
cor_multiple <- pairs(datos.18[,columnas.numericas])
print(cor_multiple)
# install.packages("corrgram")
# install.packages("corrplot")
library(corrgram)
library(corrplot)
# Gráfico de correlación, en este caso las variables no numericas, las más correlacionadas son G1 vs G2 y G3 vs G2
cor.01 <- corrplot(coef.correlacion) # los pinta con circulos
cor.01 <- corrplot(coef.correlacion, method='color') # los pinta con cuadrados
pairs (datos.18[,31:33]) # vemos que la nube de puntos está muy definida
plot(datos.18$G3 ~ datos.18$G2, pch=19) # creamos la nube de puntos de las variables más correlacionadas (variable de estudio ~ variable dependiente)
plot(datos.18$G3 ~ datos.18$G1, pch=19) # creamos la nube de puntos de las variables más correlacionadas
# Gráfico de correlación, con todas las variables
cor.02 <- corrgram(datos.18)
# Cargar una función en memoria:
source('myImagePlot.R')
# Ejecuto la función:
myImagePlot(cor(datos.18[,columnas.numericas]))
correlacion<-round(cor(datos.18[,columnas.numericas]), 1)
corrplot(correlacion, method="number", type="upper")
# Creamos un histograma de la variable que queremos observar
histo.07 <- ggplot(datos.18, aes(x=G3)) + geom_histogram(bins=20)
histo.07 <- ggplot(datos.18, aes(x=G3)) + geom_histogram(bins=20, alpha=0.5, fill="blue")
print(histo.07)
# -O-J-O- Aunque parecen iguales histo.07 es diferente a histo.08
histo.08 <- hist(datos.18$G3, bins=20)
print(histo.08)
install.packages("caTools")
library(caTools)
# En estos modelos en los que tenemos muchas variables cualitativas, no se puede dibujar todas las variables.
# Lo que se suele hacer es dividir la muestra en dos partes, training(entrenamiento) y test.
# Para que un mismo código se ejecute siempre igual utilizando números aleatorios es
# utilizando la misma semilla: "seed" - vamos a tener un número aleatorio que será el mismo para todos.
# Dividimos la muestra en training y test
set.seed(80)
# Crea una variable de tal manera que de todos los datos de la columna G3 selecciona aleatoriamente el 70% y la marca como "TRUE" y el otro 30% lo marca como "FALSE"
sample.01 <- sample.split(datos.18$G3, SplitRatio = 0.7)
# train.01 => Creamos los datos que tienen TRUE para entrenamiento
train.01 <- subset(datos.18, sample.01 == TRUE)
# test.01 => Creamos los datos que tienen FALSE para test
test.01 <- subset(datos.18, sample.01 == FALSE)
# construimos el modelo de regresión lineal
# G3~. => Columna (G3) de la que se va a hacer la estimación vs (.) Todas las columnas del dataSet
# G3~Algunas variables => Columna (G3) de la que se va a hacer la estimación vs (Algunas variables) que queremos realizar la estimación
lm.01 <- lm(G3~. , train.01)
print(summary(lm.01)) # para revisar el modelo que hemos creado:
# R makes it easy to combine multiple plots into one overall graph, using either the
# par( ) or layout( ) function.
# With the par( ) function, you can include the option mfrow=c(nrows, ncols) to create a matrix of nrows x ncols plots that are filled in by row. mfcol=c(nrows, ncols) fills in the matrix by columns.
#Diagnosis
par(mfrow = c(2,2))
plot(lm.01, pch=19)
par(mfrow = c(1,1))
plot(residuals(lm.01) ~ fitted(lm.01), cex =3)
abline(c(0,0), lwd =3, col="red")
# summary(lm.01)
# Call:
# lm(formula = G3 ~ ., data = train.01) # con los datos de entrenamiento
# Residuals: # Diferencia entre los puntos actuales de las notas y la línea de regresión(las predicciones)
# Min 1Q Median 3Q Max
# -7.6187 -0.5742 0.2859 0.9561 4.6622
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) -0.604593 2.585771 -0.234 0.815332
# schoolMS 0.508004 0.432127 1.176 0.240949
# sexM 0.084558 0.284179 0.298 0.766309
# age -0.221760 0.125624 -1.765 0.078818 .
# addressU 0.300208 0.328761 0.913 0.362099
# famsizeLE3 0.013510 0.269399 0.050 0.960047
# PstatusT -0.176428 0.396888 -0.445 0.657070
# Medu 0.161011 0.175585 0.917 0.360083
# Fedu -0.056375 0.158238 -0.356 0.721960
# Mjobhealth 0.069912 0.622606 0.112 0.910690
# Mjobother 0.274769 0.382898 0.718 0.473714
# Mjobservices -0.039659 0.439512 -0.090 0.928177
# Mjobteacher 0.397866 0.582322 0.683 0.495128
# Fjobhealth 0.404921 0.765907 0.529 0.597525
# Fjobother 0.014856 0.538507 0.028 0.978014
# Fjobservices -0.245058 0.576221 -0.425 0.671018
# Fjobteacher -0.426524 0.715837 -0.596 0.551856
# reasonhome -0.258794 0.314191 -0.824 0.410955
# reasonother 0.184480 0.458785 0.402 0.687971
# reasonreputation 0.049474 0.313758 0.158 0.874842
# guardianmother 0.137886 0.312144 0.442 0.659085
# guardianother 0.186926 0.551567 0.339 0.734988
# traveltime 0.312642 0.182022 1.718 0.087186 .
# studytime 0.002298 0.165039 0.014 0.988903
# failures -0.191750 0.185512 -1.034 0.302374
# schoolsupyes 0.328873 0.384740 0.855 0.393536
# famsupyes 0.287557 0.273198 1.053 0.293624
# paidyes -0.045122 0.275074 -0.164 0.869844
# activitiesyes -0.364792 0.244613 -1.491 0.137223
# nurseryyes -0.495734 0.300716 -1.649 0.100584
# higheryes 0.198312 0.595546 0.333 0.739438
# internetyes -0.211749 0.344051 -0.615 0.538849
# romanticyes -0.570600 0.257288 -2.218 0.027529 *
# famrel 0.334560 0.143161 2.337 0.020283 *
# freetime -0.036975 0.135855 -0.272 0.785735
# goout 0.075163 0.121018 0.621 0.535143
# Dalc -0.172839 0.182867 -0.945 0.345547
# Walc 0.192967 0.135481 1.424 0.155683
# health 0.071997 0.090268 0.798 0.425916
# absences 0.070349 0.019353 3.635 0.000341 ***
# G1 0.119215 0.076776 1.553 0.121826
# G2 1.007317 0.066454 15.158 < 2e-16 ***
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
# Residual standard error: 1.879 on 235 degrees of freedom
# Multiple R-squared: 0.8585, Adjusted R-squared: 0.8338
# F-statistic: 34.78 on 41 and 235 DF, p-value: < 2.2e-16
# B0 -> Estimate -> 0.604593,
# Con el aumento de una unidad del regresor (age, traveltime , romanticyes, famrel, absences, G2), en promedio la variable de respuesta (G3) aumenta en un (-0.221760, 0.312642,-0.570600, 0.334560, 0.070349, 1.007317), respectivamente.
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) -0.604593 2.585771 -0.234 0.815332
# Signif. codes: uno ó mas asteriscos quiere decir que con un 95% de confianza puedo afirmar que el regresor es significativo,
# El p-value va a estar por debajo de 0.05.
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 => Indica si el regresor influye ó no.
# age -0.221760 0.125624 -1.765 0.078818 .
# traveltime 0.312642 0.182022 1.718 0.087186 .
# romanticyes -0.570600 0.257288 -2.218 0.027529 *
# famrel 0.334560 0.143161 2.337 0.020283 *
# absences 0.070349 0.019353 3.635 0.000341 ***
# G2 1.007317 0.066454 15.158 < 2e-16 ***
# Desviación típica = 2.585771 = Std. Error -> Precisión con la que se ha calculado
# (age, -0.221760, traveltime, 0.312642, romanticyes, -0.570600, famrel, 0.334560, absences, 0.070349, G2, 1.007317)
# Residual standard error: Desviación Típica Residual: 1.879 on 235 degrees of freedom => Importante con 95% de confianza puedo afirmar ... (G3 -> final grade) ..??
# R-squared: Coeficiente de Determinación - R^2 - cuadrado - R-squared: Me sirve para predecir "con mucha precisión”, cuál va a ser el error del modelo, R² cuantifica la proporción como un porcentaje.
# Multiple R-squared: 0.8585 => Cómo de bueno es el modelo de regresión. Coeficiente de Desviación. 0 <= R^2 <=1.
# Somos capaces de explicar el 85% de la variación de G3
# - Cual es la probabilidad de un evento mas desfavorable.
# p-value -> indica si el regresor influye ó no influye en la variable de respuesta.
# p-value: < 2.2e-16 si < 0.05 => indica si el regresor influye , es decir, influye con un 95%.
# Si p-value: < 0.05 Influye
# Si p-value > 0.05 No tenemos evidencia para decir que influye significativamente.
# Estoy muy segur@ de estos valores (age, -0.221760, traveltime, 0.312642, romanticyes, -0.570600, famrel, 0.334560, absences, 0.070349, G2, 1.007317)
# confint(lm.01) -> # Intervalo de Confianza para los B0 y B1 poblacionales.
# Con un 95% de Intervalo de confianza puedo decir que el B1
# Poblacional de cada uno de los regresores va a estar entre:
# age -0.46925288 0.02573386
# traveltime -0.04596167 0.67124665
# romanticyes -1.07748616 -0.06371407
# famrel 0.05251738 0.61660244
# absences 0.03222188 0.10847554
# G2 0.87639510 1.13823918
# Para comprobar el modelo, calculamos los residuos (diferencia entre el valor real y el estimado)
residuos <- residuals(lm.01)
class(residuos)
residuos <- as.data.frame(residuos)
class (residuos)
head(residuos)
ggplot(residuos, aes(residuos)) + geom_histogram(fill='blue', alpha=0.5)
# la mayor parte de los valores esta concentrado en el 0.0 (valor ideal, sin discrepancia entre el valor estimado y el valor real, el modelo se ajusta para calcular las estimaciones de G3)
# Una recta de regresión debe cumplir:
# Que la distancia entre los puntos a la recta de regresión sea mínima.
# Hipótesis de Partida - Diagnosis
# - Linealidad -> Se DA por supuesta - Las variables siguen una relación lineal.
# - Normalidad -> Se debe comprobar que los residuos siguen una distribución normal
# La Y para TODAS las X NO sigue una distribución normal.. PERO
# La variable Y para un determinado valor de X SI sigue una distribución normal.
# - Homocedasticidad -> varianza constante ó variabilidad constante - distribución de probabilidad de idéntica amplitud para cada variable aleatoria -
# La nube de puntos tenga un grosor constante.
# Heterocedastico -> varianza NO es constante ó variabilidad NO constante, a pesar de que la nube de puntos tiene forma lineal la varianza ó dispersión va aumentando:
# - Independencia -> Cuando las observaciones en sí no están relacionadas.
# https://rpubs.com/camilamila/correlaciones
#Cargar paquetes
# install.packages("readxl")
# install.packages("dplyr")
# install.packages("ggplot2")
# install.packages("GGally")
# install.packages("Hmisc")
# install.packages("corrplot")
# install.packages("PerformanceAnalytics")
#Cargar paquetes
library(readxl)
library(dplyr)
library(ggplot2)
library(GGally)
library(Hmisc)
library(corrplot)
library(PerformanceAnalytics)
correlacion<-round(cor(datos.18[,columnas.numericas]), 1)
corrplot(correlacion, method="number", type="upper")
# Calcula el pvalor para los datos y lo muestra como una matriz
# Este comando nos da la misma matriz de correlación y además nos da el p-value.
# En todos el casos el p-value es muy bajo
pvalue <-
rcorr(as.matrix(datos.18[,columnas.numericas]))
print (pvalue)
# p-value -> indica si el regresor influye ó no influye en la variable de respuesta.
# Si p-value: < 0.05 Influye con un 95% de confianza
# Si p-value > 0.05 No tenemos evidencia para decir que influye significativamente.
# También podemos hacer gráficos de dispersión, calcular los coeficientes de una sola vez y ver si son estadísticamente significativos, con un solo comando:
chart.Correlation(datos.18[,columnas.numericas], histogram = F, pch = 19)
# ===
predicciones <- predict(lm.01, test.01)
result <- cbind(predicciones, test.01$G3)
head(result)
colnames(result) <- c('prediccion', 'real')
head(result)
class (result)
# [1] "matrix"
result <- as.data.frame(result)
class (result)
# [1] "data.frame"
result$prediccion
# [1] 4.3771174 15.6568779 12.7715234 16.3655279 8.9631854 16.3820285
# [7] 9.1435439 13.1077328 9.1062061 13.4271280 15.3995283 16.1977627
# [13] 17.4702455 14.3544266 5.5530143 15.6970621 9.6885738 7.6024850
# [19] 6.4328098 7.4915172 17.4899080 12.3218790 7.5560755 6.2443488
# [25] 11.5012159 10.3432983 12.1191638 15.3745353 12.5415249 13.4109435
# [31] 0.6146943 12.4557150 0.4853426 16.1308560 9.6426976 13.0082216
# [37] 11.8844407 16.5595060 8.9278011 10.7956852 8.2745915 13.9869789
# [43] 3.8424954 10.2342860 8.8699591 9.2970282 17.3334563 11.5048783
# [49] 7.5513623 14.1430590 9.0588090 10.3726738 6.3582595 11.5719115
# [55] 13.3199785 10.5074965 15.4200688 5.0613810 7.7257296 9.0689039
# [61] 9.4408250 9.5712700 7.3036283 9.8130347 10.9934444 10.4573352
# [67] -1.4233843 19.4598327 6.0324620 3.4604733 5.7240723 11.1522398
# [73] 11.8401552 12.0369193 14.5013825 7.0452523 8.3336442 17.6024005
# [79] 8.8341655 10.3233054 9.0050324 8.4608129 19.9131016 13.5218370
# [85] 7.0954642 13.9010138 18.0228677 13.6203431 11.7089381 11.3337263
# [91] 10.4307172 10.6882871 8.3023398 14.7470793 8.8220641 5.4362661
# [97] 9.9246858 8.7264188 8.2918289 13.5295694 13.7973234 8.0457389
# [103] 11.7479906 14.1316461 14.7237754 10.0418208 7.2525971 11.0112219
# [109] 12.6943915 10.8127159 11.6031553 6.1181639 5.3383231 10.4908875
# [115] 8.3727427 2.9183605 16.0769137 13.0006790
result$real
# [1] 6 15 11 15 12 18 11 10 11 13 15 15 16 11 6 15 10 8 6 7 18 14 10 6 11
# [26] 13 13 16 14 13 0 12 0 15 11 14 13 15 7 12 10 16 0 9 9 11 17 11 8 14
# [51] 11 10 8 13 14 10 15 8 7 9 12 9 6 10 12 12 0 18 8 5 8 10 12 11 14
# [76] 9 0 17 9 10 8 10 19 14 8 16 18 13 12 10 11 11 9 15 9 0 10 0 0 14
# [101] 13 8 11 13 15 10 0 10 12 11 10 0 5 10 8 0 16 10
min(result$prediccion)
# [1] -1.423384
min(result)
# [1] -1.423384 => teniendo en cuenta que la nota mínima es cero =>
# creamos una función que reemplace los valores negativos del dataSet por 0
cero <- function(x) {
if (x<0)
{return (0)}
else
{return (x)}
}
result$prediccion <- sapply(result$prediccion, cero) # aplicando, ejecutando la funcion cero sobre la columna predicción
result$prediccion
# [1] 4.3771174 15.6568779 12.7715234 16.3655279 8.9631854 16.3820285
# [7] 9.1435439 13.1077328 9.1062061 13.4271280 15.3995283 16.1977627
# [13] 17.4702455 14.3544266 5.5530143 15.6970621 9.6885738 7.6024850
# [19] 6.4328098 7.4915172 17.4899080 12.3218790 7.5560755 6.2443488
# [25] 11.5012159 10.3432983 12.1191638 15.3745353 12.5415249 13.4109435
# [31] 0.6146943 12.4557150 0.4853426 16.1308560 9.6426976 13.0082216
# [37] 11.8844407 16.5595060 8.9278011 10.7956852 8.2745915 13.9869789
# [43] 3.8424954 10.2342860 8.8699591 9.2970282 17.3334563 11.5048783
# [49] 7.5513623 14.1430590 9.0588090 10.3726738 6.3582595 11.5719115
# [55] 13.3199785 10.5074965 15.4200688 5.0613810 7.7257296 9.0689039
# [61] 9.4408250 9.5712700 7.3036283 9.8130347 10.9934444 10.4573352
# [67] 0.0000000 19.4598327 6.0324620 3.4604733 5.7240723 11.1522398
# [73] 11.8401552 12.0369193 14.5013825 7.0452523 8.3336442 17.6024005
# [79] 8.8341655 10.3233054 9.0050324 8.4608129 19.9131016 13.5218370
# [85] 7.0954642 13.9010138 18.0228677 13.6203431 11.7089381 11.3337263
# [91] 10.4307172 10.6882871 8.3023398 14.7470793 8.8220641 5.4362661
# [97] 9.9246858 8.7264188 8.2918289 13.5295694 13.7973234 8.0457389
# [103] 11.7479906 14.1316461 14.7237754 10.0418208 7.2525971 11.0112219
# [109] 12.6943915 10.8127159 11.6031553 6.1181639 5.3383231 10.4908875
# [115] 8.3727427 2.9183605 16.0769137 13.0006790
min(result$prediccion)
# [1] 0
# Hay varias formas de hacer la Evaluación del modelo:
# ME - Mean Error
# RMSE - Root Mean Square Error
# MAE - Mean Absolute Error
# MPE - Mean Percentage Error
# MASE - Mean Absolute Scaled Error
# MAPE - Mean Absolute Percentage Error = mean ( abs ( predicción - real / real ) )
# MSE - Mean of Square Error => mean ( ( predicción - real ) ^ 2 )
# SST - Sum of Square Total => es la diferencia al cuadrado entre la variable dependiente observada y su valor medio (media) => SST = Σ(actual-mean)²
# SSR - Sum of Squared Regression => La suma de las diferencias entre predicción y la media de la variable dependiente => Σ(predicción-mean)²
# SSE - Sum of Square Error => sum ( ( predicción - real ) ^ 2 ) => es la diferencia entre el valor observado y el valor de la predicción => SSE = Σ(real-predicción)²
# MSE - Mean of Square Error => mean ( ( predicción - real ) ^ 2 )
mse.01 <- mean( (result$real - result$prediccion)^2)
print(mse.01)
# SSE - Sum of Square Error => sum ( ( predicción - real ) ^ 2 ) => es la diferencia entre el valor observado y el valor de la predicción => SSE = Σ(real-predicción)²
sse.01 <- sum ( ( result$prediccion - result$real ) ^ 2 )
print(sse.01)
# TSS ó SST - Total Sum Square => la suma de todas las diferencias cuadradas entre la media de las observaciones y su conjunto
sst.01 <- sum ( ( mean(datos.18$G3) - result$real ) ^ 2 )
print(sst.01)
ssr.01 <- sum ( ( result$prediccion - mean(datos.18$G3) ) ^ 2 )
print(ssr.01)
# R² = 1-(SSE/SST)
rSquare.01 <- 1 - ( sse.01 / sst.01 )
print(rSquare.01)
# R² = SSR/SST
rSquare01.01 <- ( ssr.01 / sst.01 )
print(rSquare01.01)
# R-squared: # Somos capaces de explicar el 77% de la variación de G3.
# 2. -> Regresión logística - Supervisado - Predecir el resultado de una variable categorica en función de otras independientes.
# Modela la probabilidad de que un evento ocurra en función de otros factores, método de clasificación (binaria, 1 ó 0).
# Dibujará una curva que nos indicará la tendencia del conjunto de datos, y nos ayudará a predecir en función de un valor X un valor Y.
# Siempre será entre 0 ó 1
# Si resultado >= 0.5 => devuelve 1
# Si resultado < 0.5 => devuelve 0
# Matriz de Confusión => Compara el valor real con el valor de prediccion
# Real vs predicción
# SI vs SI => PC => Positivo Correcto
# NO vs NO => NC => Negativo Correcto
# SI vs NO => FP => Falsos Positivos => Error Tipo 1
# NO vs SI => FN => Falsos Negativos => Error Tipo 2
# La precisión sirve para saber la probabilidad de acierto en la predicción => (PC + NC ) / Total
# La tasa de error sirve para saber la probabilidad de error en la predicción => (FP + FN) / Total
# API Command => kaggle kernels pull alexisbcook/titanic-tutorial
datos.19 <- read.csv('titanic.csv')
datos.19
# names(datos.19) # con esto vemos los nombres de las variables
# summary(datos.19)
# str(datos.19)
# 'data.frame': 891 obs. of 12 variables:
# head(datos.19)
# length ( datos.19 )
# [1] 12
# n = length ( datos.19 [[1]])
# n
library(ggplot2)
help(glm)
ggplot(datos.19, aes(Survived)) + geom_bar()
ggplot(datos.19, aes(Pclass)) + geom_bar(aes(fill=factor(Pclass)))
ggplot(datos.19, aes(Survived,Pclass)) + geom_jitter(aes(color=factor(Pclass)))
ggplot(datos.19, aes(Age)) + geom_histogram(bins=20, alpha=0.5,fill='green')
ggplot(datos.19, aes(Survived,Pclass,Sex)) + geom_jitter(aes(color=factor(Sex)))
ggplot(datos.19, aes(Survived,Pclass,Sex)) + geom_jitter(aes(group=Sex, color=factor(Sex), alpha=0.0))
ggplot(datos.19, aes(Pclass, Age)) + geom_boxplot(aes(group=Pclass, fill=factor(Pclass), alpha=0.0))
ggplot(datos.19, aes(Survived,Pclass,Sex,Age)) + geom_jitter(aes(group=Sex, color=factor(Age), alpha=0.5))
# limpieza de datos
install.packages('Amelia')
library(Amelia)
# Crea un gráfico que nos permite Verificar si hay nulls en cada una de las columnas
missmap(datos.19, main="Verifica nulls", col=c('red', 'black')) # red => si hay nulls y black NO hay nulls
# Comprobación de que no se puede calcular la media de edad.
round(mean(datos.19$Age),0) # [1] NA
# Para limpiar estos datos lo que haremos será sustituir los valores NA de las edades por la media de edad en cada Pclass
# One Way
# creamos una función que reemplace los valores nulls del dataSet por 0, le pasamos la columna edad, y la columna clase
datos.19$Age
nulos <- function(Age, Pclass) {
newAge <- Age
for(i in 1:length(Age)){
if( is.na(Age[i]) ) { # si es null
if(Pclass[i] == 1){
newAge[i] <- 33.00
} else if(Pclass[i] == 2){
newAge[i] <- 28.00
} else if(Pclass[i] == 3){
newAge[i] <- 18.00
}
}
}
return(newAge)
}
# Funciones con varios argumentos y aplicar el resultado a una columna
datos.19$Age <- nulos(datos.19$Age, datos.19$Pclass)
# Two Way
# creamos una función que reemplace los valores nulls del dataSet por 0, para poder calcular la media
factor(datos.19$Pclass)
nulos <- function(x) {
#print(x)
if( is.na(x) )
{return (0)}
else
{return (x)}
}
# ejecutar la función para probarla
nulos(NA)
nulos(38)
# crear subset de datos por cada clase
datos19.Pclass1 <- subset(datos.19, datos.19$Pclass == 1)
datos19.Pclass2 <- subset(datos.19, datos.19$Pclass == 2)
datos19.Pclass3 <- subset(datos.19, datos.19$Pclass == 3)
# reemplazar los valores NA por 0 con la función
datos19.Pclass1$Age <- sapply(datos19.Pclass1$Age, nulos)
datos19.Pclass2$Age <- sapply(datos19.Pclass2$Age, nulos)
datos19.Pclass3$Age <- sapply(datos19.Pclass3$Age, nulos)
# calcular la media
round(mean(datos19.Pclass1$Age),0) # [1] 33
round(mean(datos19.Pclass2$Age),0) # [1] 28
round(mean(datos19.Pclass3$Age),0) # [1] 18
# Three Way - Otra forma de codificar la función..
# creamos una función que reciba la edad y la clase, reemplaza los valores nulls del dataSet por la media que corresponda con la clase:
nulos <- function(x, y) {
# x => edad => datos.19$Age
# y => class => datos.19$Pclass
print(x)
print(y)
if (y == 1){
if( is.na(x) )
{return (33)}
else
{return (x)}
} else if (y == 2) {
if( is.na(x) )
{return (28)}
else
{return (x)}
} else if (y == 3) {
if( is.na(x) )
{return (18)}
else
{return (x)}
}
else {return (x)}
}
nulos (NA, 1)
nulos (NA, 2)
nulos (NA, 3)
nulos (38, 1)
nulos (38, 2)
nulos (38, 3)
nulos (NA, 4)
nulos (NA, 5)
nulos (NA, 6)
nulos (38.00, 4)
nulos (35.00, 5)
nulos (54.00, 6)
nulos (38.00, 3)
nulos (35.00, 1)
nulos (54.00, 3)
# Ejecutar el modelo
library(dplyr)
# Eliminamos del df columnas que no necesitamos...como: (PassengerId, Name, Ticket, Cabin)
str(datos.19)
# 'data.frame': 891 obs. of 12 variables:
# $ PassengerId: int 1 2 3 4 5 6 7 8 9 10 ...
# $ Survived : int 0 1 1 1 0 0 0 0 1 1 ...
# $ Pclass : int 3 1 3 1 3 3 1 3 3 2 ...
# $ Name : Factor w/ 891 levels "Abbing, Mr. Anthony",..: 109 191 358 277 16 559 520 629 417 581 ...
# $ Sex : Factor w/ 2 levels "female","male": 2 1 1 1 2 2 2 2 1 1 ...
# $ Age : num 22 38 26 35 35 18 54 2 27 14 ...
# $ SibSp : int 1 1 0 1 0 0 0 3 0 1 ...
# $ Parch : int 0 0 0 0 0 0 0 1 2 0 ...
# $ Ticket : Factor w/ 681 levels "110152","110413",..: 524 597 670 50 473 276 86 396 345 133 ...
# $ Fare : num 7.25 71.28 7.92 53.1 8.05 ...
# $ Cabin : Factor w/ 148 levels "","A10","A14",..: 1 83 1 57 1 1 131 1 1 1 ...
# $ Embarked : Factor w/ 4 levels "","C","Q","S": 4 2 4 4 4 3 4 4 4 2 ...
datos.20 <- select(datos.19, -PassengerId, -Name, -Ticket, -Cabin)
print(datos.20)
str(datos.20)
# 'data.frame': 891 obs. of 8 variables:
# $ Survived: int 0 1 1 1 0 0 0 0 1 1 ...
# $ Pclass : int 3 1 3 1 3 3 1 3 3 2 ...
# $ Sex : Factor w/ 2 levels "female","male": 2 1 1 1 2 2 2 2 1 1 ...
# $ Age : num 22 38 26 35 35 18 54 2 27 14 ...
# $ SibSp : int 1 1 0 1 0 0 0 3 0 1 ...
# $ Parch : int 0 0 0 0 0 0 0 1 2 0 ...
# $ Fare : num 7.25 71.28 7.92 53.1 8.05 ...
# $ Embarked: Factor w/ 4 levels "","C","Q","S": 4 2 4 4 4 3 4 4 4 2 ...
# factorizar ó categorizar con valores concretos:
datos.20$Survived <- factor(datos.20$Survived)
datos.20$Survived
datos.20$Pclass <- factor(datos.20$Pclass)
datos.20$Pclass
datos.20$Parch <- factor(datos.20$Parch)
datos.20$Parch
datos.20$SibSp <- factor(datos.20$SibSp)
datos.20$SibSp
str(datos.20)
# 'data.frame': 891 obs. of 8 variables:
# $ Survived: Factor w/ 2 levels "0","1": 1 2 2 2 1 1 1 1 2 2 ...
# $ Pclass : Factor w/ 3 levels "1","2","3": 3 1 3 1 3 3 1 3 3 2 ...
# $ Sex : Factor w/ 2 levels "female","male": 2 1 1 1 2 2 2 2 1 1 ...
# $ Age : num 22 38 26 35 35 18 54 2 27 14 ...
# $ SibSp : Factor w/ 7 levels "0","1","2","3",..: 2 2 1 2 1 1 1 4 1 2 ...
# $ Parch : Factor w/ 7 levels "0","1","2","3",..: 1 1 1 1 1 1 1 2 3 1 ...
# $ Fare : num 7.25 71.28 7.92 53.1 8.05 ...
# $ Embarked: Factor w/ 4 levels "","C","Q","S": 4 2 4 4 4 3 4 4 4 2 ...
# Crear predicciones sobre la columna Survived por lo tanto vamos a dividir los datos en training y test:
library(caTools)
set.seed(90)
# Crea una variable de tal manera que de todos los datos de la columna Survived selecciona aleatoriamente el 70% y la marca como "TRUE" y el otro 30% lo marca como "FALSE"
sample.02 <- sample.split(datos.20$Survived, SplitRatio = 0.7)
# train.02 => Creamos los datos que tienen TRUE para entrenamiento
train.02 <- subset(datos.20, sample.02 == TRUE)
# test.02 => Creamos los datos que tienen FALSE para test
test.02 <- subset(datos.20, sample.02 == FALSE)
str(train.02)
str(test.02)
# Entrenar el modelo para predecir la variable de sobrevivencia Survived
glm.01 <- glm(Survived~., family=binomial(link='logit') , data=train.02)
summary (glm.01)
# Call:
# glm(formula = Survived ~ ., family = binomial(link = "logit"),
# data = train.02)
# Deviance Residuals:
# Min 1Q Median 3Q Max
# -2.8567 -0.6151 -0.3837 0.5783 2.5328
# Coefficients:
# Estimate Std. Error z value Pr(>|z|)
# (Intercept) 1.974e+01 3.956e+03 0.005 0.996018
# Pclass2 -1.024e+00 3.624e-01 -2.827 0.004701 **
# Pclass3 -2.398e+00 3.780e-01 -6.345 2.22e-10 ***
# Sexmale -2.603e+00 2.437e-01 -10.683 < 2e-16 ***
# Age -3.715e-02 1.052e-02 -3.533 0.000411 ***
# SibSp1 2.351e-01 2.695e-01 0.873 0.382855
# SibSp2 2.448e-02 6.458e-01 0.038 0.969764
# SibSp3 -2.158e+00 9.896e-01 -2.181 0.029188 *
# SibSp4 -1.139e+00 8.294e-01 -1.374 0.169542
# SibSp5 -1.690e+01 1.740e+03 -0.010 0.992247
# SibSp8 -1.718e+01 1.261e+03 -0.014 0.989133
# Parch1 2.061e-01 3.418e-01 0.603 0.546530
# Parch2 2.059e-01 4.671e-01 0.441 0.659372
# Parch3 -3.952e-02 1.223e+00 -0.032 0.974231
# Parch4 -1.719e+01 2.234e+03 -0.008 0.993860
# Parch5 -1.670e+01 2.001e+03 -0.008 0.993343
# Parch6 -1.734e+01 3.956e+03 -0.004 0.996503
# Fare 1.573e-03 2.527e-03 0.622 0.533635
# EmbarkedC -1.603e+01 3.956e+03 -0.004 0.996767
# EmbarkedQ -1.565e+01 3.956e+03 -0.004 0.996843
# EmbarkedS -1.629e+01 3.956e+03 -0.004 0.996716
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
# (Dispersion parameter for binomial family taken to be 1)
# Null deviance: 829.60 on 622 degrees of freedom
# Residual deviance: 525.04 on 602 degrees of freedom
# AIC: 567.04
# Number of Fisher Scoring iterations: 16
predicciones.02 <- predict(glm.01, test.02, type='response')
predicciones.02
predicciones02.bin <- ifelse (predicciones.02 > 0.5, 1, 0) # Reemplaza los valores de las predicciones para convertirlas en binarias (1,0)
predicciones02.bin
head(predicciones02.bin)
str(predicciones02.bin)
# calcular la precisión del modelo
error.02 <- mean( (predicciones02.bin != test.02$Survived))
precision.02 <- 1 - error.02
precision.02 # el modelo acierta el 82% de las veces si un pasajero sobrevive o no al hundimiento del titanic.csv
# 1. -> K vecinos más cercanos - KNN => clasificación, estima la probabilidad de que un elemento "x" pertenezca a una clase "C", a partir de la información proporcionada.
install.packages('ISLR')
library(ISLR)
# https://www.rdocumentation.org/packages/ISLR/versions/1.2/topics/Caravan
datos.21 <- Caravan # DB de seguros
head(datos.21)
str(datos.21)
summary(datos.21)
summary(datos.21$Purchase)
# Estimar mediante K vecinos si el cliente compra ó no compra! -
any(is.na(datos.21))
# [1] FALSE # Revisar nulls, en este caso no hay nulls en los datos
# Estandarizar las columnas => separar la variable de analisis Purchase
datos.21.compra <- datos.21[,86]
# esto es lo mismo
datos.21.compra <- datos.21$Purchase
# Estandarizar los datos => todas las columnas menos la 86
datos.21.estandarizados <- scale(datos.21[,-86])
# Crear los datos de prueba (test.03) y de entrenamiento (train.03),
# seleccionar las mil primeras filas para datos de pruebas. (test.03)
filas <- 1:1000
filas
test.03.datos <- datos.21.estandarizados[filas,] # incluye las 1000 filas y SUS las columnas
str(test.03.datos)
test.03.compra <- datos.21.compra[filas] # incluye las 1000 filas de la columna Purchase
str(test.03.compra)
length(test.03.compra) # [1] 1000 filas
length(test.03.compra[[1]]) # [1] 1 columna
train.03.datos <- datos.21.estandarizados[-filas,] # Quita las 1000 filas y sus columnas
str(train.03.datos)
train.03.compra <- datos.21.compra[-filas] # Quita las 1000 filas de la culumna Purchase
str(train.03.compra)
length(train.03.compra) # [1] 4822 filas
length(train.03.compra[[1]]) # [1] 1 columna
# ejecutar el modelo y obtener las predicciones
# install.packages('class')
library(class)
set.seed(90)
predicciones.03.compra <- knn(train.03.datos, test.03.datos, train.03.compra, k=1) # revisar los parámetros k=1 vecino
str(predicciones.03.compra)
length(predicciones.03.compra) # [1] 1000 filas
length(predicciones.03.compra[[1]]) # [1] 1 columna
head(predicciones.03.compra)
# calcular la precisión del modelo
error.03 <- mean(test.03.compra != predicciones.03.compra) # sacar la media de los valores de las pruebas de la columna objetivo es distinto de la predicción
error.03 # [1] 0.116, el error es el del 11%
precision.03 <- 1 - error.03
precision.03 # el modelo acierta el 88% de las veces si el cliente compra ó no
predicciones.03.compra <- knn(train.03.datos, test.03.datos, train.03.compra, k=5) # 5 vecinos
# calcular la precisión del modelo
error.03 <- mean(test.03.compra != predicciones.03.compra) # sacar la media de los valores de las pruebas de la columna objetivo es distinto de la predicción
error.03 # [1] 0.066 disminuye el error con el aumento de k vecinos
precision.03 <- 1 - error.03
precision.03 # el modelo acierta el 93.% de las veces si el cliente compra ó no
# cual es el mejor k para el modelo?
predicciones.03.compra <- NULL
errores <- NULL
for (i in 1:20){
set.seed(90)
predicciones.03.compra <- knn(train.03.datos, test.03.datos, train.03.compra, k=i) # i vecinos
errores[i] <- mean(test.03.compra != predicciones.03.compra)
# print(errores[i])
}
print(errores)
# [1] 0.116 0.117 0.074 0.073 0.066 0.065 0.062 0.061 0.058 0.059 0.059 0.059
# [13] 0.059 0.059 0.059 0.059 0.059 0.059 0.059 0.059
valores.k <- 1:20
tabla.errores <- data.frame(errores, valores.k)
print(tabla.errores)
# errores valores.k
# 1 0.116 1
# 2 0.117 2
# 3 0.074 3
# 4 0.073 4
# 5 0.066 5
# 6 0.065 6
# 7 0.062 7
# 8 0.061 8
# 9 0.058 9 # el k optimo es 9 porque más bajo para el error 0.058%
# 10 0.059 10
# 11 0.059 11
# 12 0.059 12
# 13 0.059 13
# 14 0.059 14
# 15 0.059 15
# 16 0.059 16
# 17 0.059 17
# 18 0.059 18
# 19 0.059 19
# 20 0.059 20
# 2. -> Árboles de decisión : https://web.fdi.ucm.es/posgrado/conferencias/JorgeMartin-slides.pdf
# Sirven Representar y Categorizar una serie de condiciones que ocurren de forma sucesiva, para la resolución de un problema.
install.packages = ('rpart')
library(rpart)
# https://www.rdocumentation.org/packages/rpart/versions/4.1-15/topics/kyphosis => Representa los datos de los niños que han tenido una cirugía correctiva de la columna vertebral
datos.22 <- kyphosis
str(datos.22)
# Averiguar si la enfermedad kyphosis (relacionada con la columna vertebral), esta present ó absent, el dataSet contiene datos relacionados:
# Age(edad), Number(de vertebras), Start (el número de la primera vértebra (superior) operada.)
arbol.01 <- rpart(kyphosis, data=datos.22) # columna a predecir kyphosis y el dataSet completo
printcp(arbol.01)
# dibujar el arbol de decision - One Way
plot(arbol.01, uniform = T, main='Árbol de decision')
text(arbol.01, use.n = T, all = T)
# dibujar el arbol de decision - Two Way
install.packages("rpart.plot")
library(rpart.plot)
prp(arbol.01)
# 3. -> Random Forest -> Bosques Aleatorios - Algoritmo de clasificación
# combinación entre arboles de decisión en la que cada arbol selecciona una clase y luego se combinan las decisiones de cada uno para seleccionar la mejor opción.
# Maneja cientos de variables de entrada, eficiente en DB grandes
install.packages("randomForest")
library(randomForest, help, pos = 2, lib.loc = NULL)
help(randomForest)
rf.01 <- randomForest(Kyphosis ~ ., data= datos.22)
print(rf.01)
rf.01$predicted # muestra las 81 predicciones
# 3. -> Máquinas de Soporte Vectorial -> SVM -> Conjunto de algoritmos de aprendizaje supervisado para resolver problemas de clasificación y regresión.
# Analizar datos y resolver patrones
# Dado un conjunto de ejemplos de entrenamiento, podemos etiquetar las clases y entrenar una SVM para construir un modelo que prediga la clase de una nueva muestra.
# Se separan las clases en dos espacios lo más amplio posible..
library (ISLR)
help(rpart)
# https://www.rdocumentation.org/packages/datasets/versions/3.6.2/topics/iris
factor ( datos.23 $ Species)
install.packages("e1071")
library(e1071)
help(svm)
svm.01 <- svm(Species ~ . , data=datos.23)
svm.01
predicc.svm <- predict(svm.01, datos.23[1:4]) # son los datos que contienen las caracteristicas
predicc.svm
tabla <- data.frame(datos.23,predicc.svm)
tabla
# 4. -> k-medias -> no supervisado, usado para clusterización
# Partición de un conjunto de "n" observaciones en "k" grupos, en el que cada observación pertenece al grupo cuyo valor medio es más cercano. - mineria de datos
library(ISLR)
datos.24 <- iris
str(datos.24)
help(kmeans)
library(ggplot2)
kmeans.01 <- ggplot(datos.24, aes(Petal.Length, Petal.Width, color = Species)) + geom_point(size=5)
print(kmeans.01)
set.seed(90)
clusters <- kmeans(datos.24[,1:4], center = 3) # todas las filas y las cuatro columnas, y 3 Species(setosa versicolor virginica); cantidad de sets que deben ser seleccionados,
clusters
# para ver mejor como ha hecho la clasificación podemos crear una tabla:
table(clusters$cluster, datos.24$Species)
# setosa versicolor virginica
# 1 0 2 36
# 2 50 0 0
# 3 0 48 14
library(cluster)
clusplot(datos.24, clusters$cluster, color=T, shade=T, lablels=0, lines=0)
help(clusplot)
# Redes neuronales => recibir, procesar y transmitir información
# Perceptrón => neurona artificial, la unión de varios crean una red neuronal artificial.
# Se compone de:
# - Canales/Señales de entrada - Dentritas
# - Función de activación - Soma o núcleo - (unión sumadora)
# - Canal de salida y Axón.
# n
# Σ WiXi + b
# i=0
install.packages('MASS')
library(MASS)
# https://www.kaggle.com/andyxie/regression-with-r-boston-housing-price
datos.25 <- Boston
str(datos.25)
head(datos.25)
pairs(datos.25, main="Boston Data") # para ver el plot de todas las variables del data Set
# revisar nulls
any(is.na(datos.25))
# normalizar datos, calculamos el máximo y el minimo de cada una de las columnas
maximo <- apply(datos.25,2,max) # aplica el máximo a las columnas
print(maximo)
minimo <- apply(datos.25,2,min) # aplica el mínimo a las columnas
print(minimo)
max(datos.25$crim)
min(datos.25$crim)
# normalizar los datos - para ver las similitudes entre unas caracteristicas y otras
datos.25.normalizados <- scale(datos.25, center=minimo, scale=maximo-minimo)
datos.25.normalizados <- as.data.frame(datos.25.normalizados)
library(caTools)
# Crea una variable de tal manera que de todos los datos de la columna Objetivo "medv" selecciona aleatoriamente el 70% y la marca como "TRUE" y el otro 30% lo marca como "FALSE"
sample.25 <- sample.split(datos.25.normalizados$medv, SplitRatio = 0.7)
# train.25 => Creamos los datos que tienen TRUE para entrenamiento
train.25 <- subset(datos.25.normalizados, sample.25 == TRUE)
# test.25 => Creamos los datos que tienen FALSE para test
test.25 <- subset(datos.25.normalizados, sample.25 == FALSE)
install.packages("neuralnet")
library(neuralnet)
# Crear la formula para el modelo, nuestro objetivo "medv" lo calcularemos utilizando las suma de las demás caracteristicas
formula.25 <- medv ~ crim + zn + indus + chas + nox + rm + age + dis + rad + tax + ptratio + black + lstat
# modelo
help(neuralnet)
red_neuronal.01 <- neuralnet(formula.25, train.25, hidden = c(5,3), linear.output = TRUE) # Dos capas ocultas de 5 y de 3 neuronas respectivamente,
# para ajustar más se puede aumentar las capas ocultas y las neuronas
plot(red_neuronal.01)
# Explicación del grafico:
# Entrada => todas las variables exepto nuestro objetivo "medv"
# capa de 5 neuronas
# capa de 3 neuronas
# resultado final => estimación del valor medio medv
# cada uno de los enlaces tendrá un peso
# predicciones
predicc.25 <- compute(red_neuronal.01, test.25[1:13])
str(predicc.25) # lista de elementos de las predicciones del dataSet
predicc.25.Desnormalizados <- predicc.25$net.result * (max(datos.25$medv) - min(datos.25$medv)) + min(datos.25$medv)
test.25.Desnormalizados <- (test.25$medv) * (max(datos.25$medv) - min(datos.25$medv)) + min(datos.25$medv)
error.25 <- sum((test.25.Desnormalizados - predicc.25.Desnormalizados)^2)/ nrow(test.25)
error.25
errores.25 <- data.frame(test.25.Desnormalizados, predicc.25.Desnormalizados)
errores.25
library(ggplot2)
# Relación real vs predicciones
neuronal.01 <- ggplot(errores.25, aes(x=test.25.Desnormalizados, y=predicc.25.Desnormalizados)) + geom_point()
print(neuronal.01)
|
875f5eecb96e7d491b757fbfbf3facced7ea1610
|
7f204da91304186ead43ac64f281b9531ed36d07
|
/man/dini.surface.Rd
|
5d9f8f6067b4a6cd89cd558dbdd34537a09db4cb
|
[] |
no_license
|
schloerke/geozoo
|
3b61808be45f056e0e138e49ed50bc185bf6fd7c
|
f633781f098779f8ad2b95c19811b11738b39068
|
refs/heads/master
| 2021-01-17T13:54:24.858588
| 2016-05-06T03:06:47
| 2016-05-06T03:06:47
| 16,112,665
| 5
| 0
| null | 2016-01-13T18:04:53
| 2014-01-21T18:05:20
|
R
|
UTF-8
|
R
| false
| true
| 645
|
rd
|
dini.surface.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parametric.R
\name{dini.surface}
\alias{dini.surface}
\title{Dini Surface}
\usage{
dini.surface(n = 10000, a = 1, b = 1)
}
\arguments{
\item{n}{number of points}
\item{a}{outer radius of object}
\item{b}{space between loops}
}
\value{
\item{points }{location of points}
\item{edges }{edges of the object (null)}
}
\description{
A function to generate a dini surface.
}
\examples{
## Generates a Dini Surface
dini.surface(n = 1000, a = 1, b = 1)
}
\author{
Barret Schloerke
}
\references{
\url{http://schloerke.github.io/geozoo/mobius/other/}
}
\keyword{dynamic}
|
4af1fd1eb535c6a38f3ce3e71182f07361526bd9
|
eec11787a7884f1ad0658b8955d183e5b67cbc77
|
/package/mcmc/man/logit.Rd
|
aa9144ef73bf1b4875df843dcc76e1f8f9b5138f
|
[
"ICU"
] |
permissive
|
cjgeyer/mcmc
|
5e917b5c3b7dc696c5173d06432421bbf647df7a
|
225fa82eefcdc6d75fd7ad21acfd9e5373ef9fa2
|
refs/heads/master
| 2021-01-13T02:38:48.445095
| 2020-03-24T04:41:02
| 2020-03-24T04:41:02
| 2,998,392
| 18
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 524
|
rd
|
logit.Rd
|
\name{logit}
\docType{data}
\alias{logit}
\title{Simulated logistic regression data.}
\description{
Like it says
}
\usage{data(logit)}
\format{
A data frame with variables
\describe{
\item{x1}{quantitative predictor.}
\item{x2}{quantitative predictor.}
\item{x3}{quantitative predictor.}
\item{x4}{quantitative predictor.}
\item{y}{Bernoulli response.}
}
}
\examples{
library(mcmc)
data(logit)
out <- glm(y ~ x1 + x2 + x3 + x4, family = binomial, data = logit)
summary(out)
}
\keyword{datasets}
|
427e4905aef32984da59179d13bfb7a05dc296d7
|
a9550fbcc10bdda5cc6ab8a985a0e8c15c98c100
|
/man/treePlot.Rd
|
6e66a030f1dfe7d87f41d30d85b4c6c389ae6944
|
[] |
no_license
|
markrobinsonuzh/treeAGG
|
fa6a7705b1a0d74dfedbb18a43d309a818f237ef
|
e26b9401369ddc3682677d644232cc0205e5fdeb
|
refs/heads/master
| 2021-03-16T07:44:55.688468
| 2019-03-23T14:07:19
| 2019-03-23T14:07:19
| 98,645,726
| 2
| 1
| null | 2019-03-23T14:07:20
| 2017-07-28T12:04:06
|
R
|
UTF-8
|
R
| false
| true
| 5,448
|
rd
|
treePlot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/treePlot.R
\name{treePlot}
\alias{treePlot}
\title{Visualize the phylogenetic tree}
\usage{
treePlot(tree, branch = NULL, col.branch = "blue",
col.other = "grey", point = NULL, col.point = "orange",
size.point = 2, zoomNode = NULL, zoomLevel = NULL, zoomScale = 8,
legend = FALSE, legend.theme = list(NULL), legend.title = c(point =
"Title_point", branch = "Title_branch"), legend.label = NULL,
size.line.legend = 2, size.point.legend = 3, size = 1, ...)
}
\arguments{
\item{tree}{A phylo object}
\item{branch}{A vector of node numbers labels to specify the branches to be
colored. Each branch is represented by its branch node. A leaf node
reprents the edge connecting the leaf and its parent.}
\item{col.branch}{A vector of colors. Its length should be one or equals to
the length of \strong{branch}. If \strong{col.branch} has the same length
as \strong{branch}, the branches are colored correspondingly with the
\strong{col.branch}. The default is blue.}
\item{col.other}{A color for the branches other than those specified in
\strong{branch}}
\item{point}{A vector of node numbers or node labels to specify the
locations to add points in the tree}
\item{col.point}{A color for the \strong{point}. It has length equal to one.}
\item{size.point}{The size for the \strong{point}. It has length equal to
one.}
\item{zoomNode}{A vector of nodes to be zoomed in. If default (NULL), the
tree is not zoomed in.}
\item{zoomLevel}{A numeric vector. Its length is equal to 1 or equal to the
length of \strong{zoomNode}. If default (NULL), a leaf is zoomed in its
direct parent level and an internal node is zoomed in its own level.}
\item{zoomScale}{A numeric vector. Its length is equal to one or equal to the
length of \strong{zoomNode}. If \strong{zoomScale} has the same length as
\strong{zoomNode}, the branches are zoomed in with different scales
corresponding to the value of \strong{zoomScale}. If default (NULL), tree
is not zoomed in.}
\item{legend}{TRUE or FALSE. Default is FALSE. If TRUE, the legend is
created.}
\item{legend.theme}{A list of arguments used for the theme in ggplot2 package
(see \code{\link[ggplot2]{theme}} ) and starting with "legend."}
\item{legend.title}{A vector to specify the title of the legend. It must be
named with "branch" and "point" to match with the argument \strong{branch}
and \strong{point}.}
\item{legend.label}{A list with three members: "col.branch", "col.other", and
"col.point". The elements order in each member matches with the
corresponding argument \strong{col.branch}, \strong{col.other} and
\strong{col.point}, and will display in the legend.}
\item{size.line.legend}{The line size shown in the legend for \strong{branch}}
\item{size.point.legend}{The point size shown in the legend for
\strong{point}.}
\item{...}{see also \code{\link[ggtree]{ggtree}}}
}
\value{
A tree plot
}
\description{
\code{treePlot} visualizes a phylogenetic tree.
}
\details{
treePlot is created based on the \pkg{ggtree} and \pkg{ggplot2}. We
could combine geoms from these two packages with \code{treePlot} to add
geoms.
}
\examples{
data(bigTree)
# If we want to color two branches with branch node 1000 and 1400
treePlot(tree = bigTree, branch = c(1000, 1400),
zoomNode = 1000, zoomScale = 10)
# use col.branch and col.other to specify colors
treePlot(tree = bigTree, branch = c(1000, 1400),
col.branch = c("salmon", "blue"), col.other = "grey40")
# add legend to the colored branches
treePlot(tree = bigTree, branch = c(1000, 1400),
col.branch = c("salmon", "blue"), col.other = "grey40",
legend = TRUE, legend.label = list(col.branch = c("up", "down")))
# change legend title
p <- treePlot(tree = bigTree, branch = c(1000, 1400),
col.branch = c("salmon", "blue"), col.other = "grey40",
legend = TRUE,
legend.label = list(col.branch = c("Go up", "Go down")),
legend.title = c("branch" = "Abundance"))
# change legend position (combine with ggplot2 package)
library(ggplot2)
p + ggplot2::theme(legend.position = "bottom")
# change legend position use legend.theme
treePlot(tree = bigTree, branch = c(1000, 1400),
col.branch = c("salmon", "blue"), col.other = "grey40",
legend = TRUE,
legend.label = list(col.branch = c("Go up", "Go down")),
legend.title = c("branch" = "Truth"),
legend.theme = list(legend.position = "bottom"))
# add points
treePlot(tree = bigTree, branch = c(1000, 1400),
col.branch = c("salmon", "blue"), col.other = "grey40",
legend = TRUE,
legend.label = list(col.branch = c("Go up", "Go down")),
legend.title = c("branch" = "Truth"),
legend.theme = list(legend.position = "bottom"),
point = c(500, 5, 10))
# add points label in legend
treePlot(tree = bigTree, branch = c(1000, 1400),
col.branch = c("salmon", "blue"), col.other = "grey40",
legend = TRUE,
legend.label = list(col.branch = c("Go up", "Go down"),
col.point = "Found"),
legend.title = c("branch" = "Truth", "point"= "Estimate"),
legend.theme = list(legend.position = "bottom"),
point = c(500, 5, 10))
# add points label in legend
treePlot(tree = bigTree, branch = c(1000, 1400),
col.branch = c("salmon", "blue"), col.other = "grey40",
legend = TRUE,
legend.label = list(col.branch = c("Go up", "Go down"),
col.point = "Found", col.other = "Same"),
legend.title = c("branch" = "Truth", "point"= "Estimate"),
legend.theme = list(legend.position = "bottom"),
point = c(500, 5, 10))
}
\author{
Ruizhu Huang
}
|
823ae9c090af667c7c5e637456bf5e62fee24d6a
|
b66de58525899583058979bc7da771f36fe7e0db
|
/man/importWAQN.Rd
|
49736c82b26383887f4565cf8a0f14f773cd4554
|
[] |
no_license
|
keyonghu/openair
|
4e90d78e23f170e3299870fd88bcd616cdee14cf
|
a432ec912180678b7f3034eaec98c9b0e36485b9
|
refs/heads/master
| 2021-05-26T03:10:21.854502
| 2020-04-06T08:04:48
| 2020-04-06T08:04:48
| 254,028,347
| 1
| 0
| null | 2020-04-08T08:24:50
| 2020-04-08T08:24:50
| null |
UTF-8
|
R
| false
| true
| 2,933
|
rd
|
importWAQN.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/importWAQN.R
\name{importWAQN}
\alias{importWAQN}
\title{Welsh Air Quality Network data import for openair}
\usage{
importWAQN(
site = "card",
year = 2018,
pollutant = "all",
meta = FALSE,
to_narrow = FALSE
)
}
\arguments{
\item{site}{Site code of the WAQN site to import e.g. "card" is Cardiff Centre.
Several sites can be imported with \code{site = c("card", "cae6")}
--- to import Cardiff Centre and Hafod-yr-ynys Roadside for example.}
\item{year}{Year or years to import. To import a sequence of years from 1990
to 2000 use \code{year = 1990:2000}. To import several specfic years use
\code{year = c(1990, 1995, 2000)} for example.}
\item{pollutant}{Pollutants to import. If omitted will import all pollutants
from a site. To import only NOx and NO2 for example use \code{pollutant =
c("nox", "no2")}.}
\item{meta}{Should meta data be returned? If \code{TRUE} the site type,
latitude and longitude are returned.}
\item{to_narrow}{By default the returned data has a column for each
pollutant/variable. When \code{to_narrow = TRUE} the data are stacked into
a narrow format with a column identifying the pollutant name.}
}
\value{
Returns a data frame of hourly mean values with date in POSIXct
class and time zone GMT.
}
\description{
Function for importing hourly mean Welsh Air Quality Network (WAQN)
archive data files for use with the \code{openair} package. Files are
imported from a remote server operated by Ricardo that provides air quality data
files as R data objects.
}
\details{
The \code{importWAQN} function has been written to make it easy to import
data from the Welsh Air Quality Network (WAQN) ---
\url{https://airquality.gov.wales/}. Ricardo have provided
.RData files (R workspaces) of all individual sites and years for the WAQN.
These files are updated on a daily basis. This approach requires a link to
the Internet to work.
For a list of up to date site codes and site information, see
\code{\link{importMeta}} and in particular \code{importMeta(source =
"waqn")}.
The site codes and pollutant names can be upper or lower case. The function
will issue a warning when data less than six months old is downloaded, which
may not be ratified.
The function also returns wind speed (ws) and direction (wd) for more recent
years derived from WRF (regional meteorological model).
}
\examples{
## see what sites are available
\dontrun{meta <- importMeta("waqn")}
## import all pollutants from Cardiff Centre
\dontrun{cardiff <- importWAQN(site = "card", year = 2010:2018)}
## import all pollutants from two sites for 2018
\dontrun{all <- importWAQN(site = c("card", "cae6"), year = 2018)}
}
\seealso{
See \code{\link{importAURN}} for data elsewhere in the UK and
\code{\link{importKCL}} for importing comprehensive data in and around
London.
}
\author{
David Carslaw and Trevor Davies
}
\keyword{methods}
|
d3f1846bdd31f39618f3274d07f43cd2e1ac42d7
|
15654deeaf224a14bd5e5db656179e991e498caa
|
/man/get_alberta_case_data.Rd
|
c0d817ed40f41286bf52a02f1e4a9bad8da8c924
|
[
"MIT"
] |
permissive
|
mountainMath/CanCovidData
|
8451475655fa84ce6d4b4e59db58cd17a1ec4680
|
b7782d9d96e0f1b9083fd38337bc1ae593b818ca
|
refs/heads/master
| 2022-08-03T14:03:41.422542
| 2022-07-24T01:02:08
| 2022-07-24T01:02:08
| 251,169,972
| 9
| 3
|
NOASSERTION
| 2020-08-05T04:48:04
| 2020-03-30T01:16:04
|
R
|
UTF-8
|
R
| false
| true
| 504
|
rd
|
get_alberta_case_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/canada_covid_data_import.R
\name{get_alberta_case_data}
\alias{get_alberta_case_data}
\title{import and recode ontario case data from Alberta Open Data. Tends to have a day lag}
\usage{
get_alberta_case_data()
}
\value{
a wide format data frame with one row per case with Health Region, gender, age group,
status, case type
}
\description{
import and recode ontario case data from Alberta Open Data. Tends to have a day lag
}
|
2b2f8f0c9081626318c01e54235448fee6594a65
|
9a006d47b7d13beea5005c543e99d6c123736e96
|
/plotNetwork.R
|
ba7135db40f2c541705eff19e2794665e9f0cbef
|
[] |
no_license
|
yenlow/emailNetwork
|
e7924a1d952337cc814e9ec7dca960179c853496
|
9c14977c9abb53ab598988e9ca55c7002490c704
|
refs/heads/master
| 2020-12-24T13:18:17.554466
| 2015-01-24T00:40:31
| 2015-01-24T00:40:31
| 29,760,187
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,248
|
r
|
plotNetwork.R
|
# plot network of sender -> receipient activity from Enron email logs
# Input: output (enron.csv) from parseEmail.py
# 23-Jan-15 Yen Low
#source("~/scripts/R/utils.R")
#source("http://bioconductor.org/biocLite.R")
#installnewpackage(c("graph", "Rgraphviz", "chron"))
#biocLite("graph")
#biocLite("Rgraphviz")
require("igraph")
#require("Rgraphviz") #doesn't allow edge weights
require("chron")
#read sender, receipient, date, time
edges <- read.csv("enron.csv", header=FALSE, as.is=TRUE)
#get all possible unique userids
userids <- unique(c(edges$V1, edges$V2))
#create adjacency matrix
adjMat = matrix(0, nrow=length(userids), ncol=length(userids))
rownames(adjMat) <- userids
colnames(adjMat) <- userids
#format dates
edges$date <- chron(dates=edges$V3, times=edges$V4,
format=c(dates="Y/m/d", times="h:m:s"))
#keep only sender-receipient pair
edges <- edges[, -c(3,4)]
for (i in 1:nrow(edges)) {
x = edges$V1[i]
y = edges$V2[i]
adjMat[x, y] <- adjMat[x,y] + 1
}
####### Using igraph
gwt=graph.adjacency(adjMat,diag=F,mode="directed",weighted=TRUE)
#remove insig nodes
threshold=50
deletedVertices=names(degree(gwt))[degree(gwt)<threshold]
gwt2=gwt-deletedVertices
#delete insig edges
deletededges=E(gwt2)[abs(E(gwt2)$weight)<threshold]
gwt2=gwt2-deletededges
#plot(gwt2)
layout=layout.fruchterman.reingold(gwt2,weights=E(gwt2)$weight,area=1000*vcount(gwt2)^2)
png("emailNetwork.png",width=7,height=7,units="in",res=150)
plot(gwt2,layout=layout,vertex.shape="circle",vertex.size=2,
vertex.color="white",vertex.label=V(gwt2)$name,
vertex.label.cex=1,vertex.label.family="Helvetica",
vertex.label.color="black",vertex.label.font=2,
edge.arrow.width=0.5,edge.width=E(gwt2)$weight/100)
dev.off()
####### Using RGraphViz
#create graph object
#g2 <- new("graphAM", adjMat = adjMat, edgemode="directed",
# values=list(weight="weight"))
#deg = degree(g2)
#biggies = names(deg$outDegree[which(deg$outDegree > 10)])
#g2a = as(g2, "graphNEL")
#gsa = subGraph(biggies, g2a)
#png("emailNetwork.png",width=7,height=7,units="in",res=150)
#plot(gsa, attrs=list(node=list(color="black", fillcolor="black", shape="plaintext", fontcolor="black"),
# edge=list(color="gray")))
#dev.off()
|
62866544b46fc38759a6b8a9a2951128dc5bcd83
|
aa12f960ef508476e6ea7aa3da09aee0d917d35b
|
/VG_Sexism.R
|
4010d0987c6bce822fc899e85df6a9d5416c2d1f
|
[] |
no_license
|
DalJeanis/VG_Sexism
|
1ea04e156ed846d1c8acc1c97a6583702b243b7e
|
c89e3c4b4527425c86c90b7aa816148223a037e3
|
refs/heads/master
| 2021-01-15T20:52:21.670903
| 2015-09-03T04:05:10
| 2015-09-03T04:05:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,381
|
r
|
VG_Sexism.R
|
#####Import data and Check
##Read csv data table (File = "Data")
data = read.table(file.choose(), na.strings =".", header=T, sep=",")
##Attach and check the headers of each column
attach(data)
names(data)
############################
####General housekeeping####
##Load the necessary libraries
library(plyr)
library(nlme)
library(MASS)
library(lattice)
library(ggplot2)
library(lme4)
library(effects)
library(QuantPsyc)
##Ensuring that certain columns are treated as categorical
GameOutcome <- factor(GameOutcome)
Treatment <- factor(Treatment)
##Add new columns
##Add Difference in Max skill
data$SkillDiff <- data$MaxSkillLevel - data$TreatmentMaxSkill
##Add Difference in Kills
data$KillDiff <- data$Kills-data$TreatmentKills
##Add Difference in Deaths
data$DeathDiff <- data$Deaths-data$TreatmentDeaths
##Subset data by sex
fem = subset(data,Treatment=="Female")
mal = subset(data,Treatment=="Male")
##Subset data from 'data' that has less than 20 negative comments to test for significance without outliers.
dataNoOutliers = subset(data, data$Negative<20)
##Standardize positive statements (data) and negative statments (dataNoOutliers)
data$stdPos = scale(data$Positive, center = TRUE, scale = TRUE)
data$stdKills = scale(data$Kills, center = TRUE, scale = TRUE)
data$stdDeaths = scale(data$Deaths, center = TRUE, scale = TRUE)
data$stdSkill = scale(data$MaxSkillLevel, center = TRUE, scale = TRUE)
data$stdSkillDiff = scale(data$SkillDiff, center = TRUE, scale = TRUE)
data$stdKillDiff = scale(data$KillDiff, center = TRUE, scale = TRUE)
data$stdDeathDiff = scale(data$DeathDiff, center = TRUE, scale = TRUE)
dataNoOutliers$stdNeg = scale(dataNoOutliers$Negative, center = TRUE, scale = TRUE)
dataNoOutliers$stdKills = scale(dataNoOutliers$Kills, center = TRUE, scale = TRUE)
dataNoOutliers$stdDeaths = scale(dataNoOutliers$Deaths, center = TRUE, scale = TRUE)
dataNoOutliers$stdSkill = scale(dataNoOutliers$MaxSkillLevel, center = TRUE, scale = TRUE)
dataNoOutliers$stdSkillDiff = scale(dataNoOutliers$SkillDiff, center = TRUE, scale = TRUE)
dataNoOutliers$stdKillDiff = scale(dataNoOutliers$KillDiff, center = TRUE, scale = TRUE)
dataNoOutliers$stdDeathDiff = scale(dataNoOutliers$DeathDiff, center = TRUE, scale = TRUE)
#######################
####Checking counts####
##Checking counts of any variable (in this example, the number of male and female trials)
table(data$Treatment)
table(data$Game)
table(data$Sexism)
####################
####ModelTesting####
#########################
####Positive Comments####
##Test full model to examine the correlation between the number of Positive Comments and game outcome, treatment (condition), Kills, Skill level, and interactions of condition with Kills and Skill Level.
modelAGGPos<-glm(Positive ~ GameOutcome + Treatment + Kills + Deaths + MaxSkillLevel + Treatment*Kills + Treatment*Deaths + Treatment*MaxSkillLevel, (family = poisson(link="log")), data=data)
anova(modelAGGPos, test="Chisq")
##Use standarrdized model to get stadardized Betas
modelAGGPos.std<-glm(Positive ~ GameOutcome + Treatment + stdKills + stdDeaths + stdSkill+ Treatment*stdKills + Treatment*stdDeaths + Treatment*stdSkill, (family = poisson(link="log")), data=data)
summary(modelAGGPos.std)
#####
##Test full model to examine how relative performance to The Player affected positive comments with treatment as an interaction
modelAGGPosPerf<-glm(Positive ~ Treatment + SkillDiff + KillDiff + DeathDiff + Treatment*SkillDiff + Treatment*KillDiff + Treatment*DeathDiff, (family = poisson(link="log")), data=data)
anova(modelAGGPosPerf, test="Chisq")
##Use standarrdized model to get stadardized Betas
modelAGGPosPerf.std<-glm(Positive ~ Treatment + stdSkillDiff + stdKillDiff + stdDeathDiff + Treatment*stdSkillDiff + Treatment*stdKillDiff + Treatment*stdDeathDiff, (family = poisson(link="log")), data=data)
summary(modelAGGPosPerf.std)
##Plot the predicted lines for the model
##Figure 1
modelAGGPos.eff <-effect("Treatment*MaxSkillLevel",modelAGGPos, se=TRUE)
plot(modelAGGPos.eff, rescale.axis=F, rug=FALSE, ylab="Number of Positive Comments",
xlab="Maximum Skill Level Acheived", multiline= T)
##Figure 2
modelAGGPosPerf.eff <-effect("Treatment*SkillDiff",modelAGGPosPerf, se=TRUE)
plot(modelAGGPosPerf.eff, rescale.axis=F, rug=FALSE, ylab="Number of Positive Comments",
xlab="Difference in Maximum Skill Level Acheived", multiline= T)
#########################
####Negative comments####
##Test full model to examine the correlation between the number of Negative Comments and game outcome, Treatment (i.e. condition), Kills, Skill level, and interactions of condition with Kills and Skill Level.
##Removing the two outliers
modelAGGNeg<-glm(Negative ~ GameOutcome + Treatment + Kills + Deaths + MaxSkillLevel+ Treatment*Kills + Treatment*Deaths + Treatment*MaxSkillLevel, (family = poisson(link="log")), data=dataNoOutliers)
anova(modelAGGNeg, test="Chisq")
##Use standarrdized model to get stadardized Betas
modelAGGNeg.std<-glm(Negative ~ GameOutcome + Treatment + stdKills + stdDeaths + stdSkill+ Treatment*stdKills + Treatment*stdDeaths + Treatment*stdSkill, (family = poisson(link="log")), data=dataNoOutliers)
summary(modelAGGNeg.std)
##Get the mean number of negative comments in the different treatments
ddply(dataNoOutliers, c("Treatment"), summarise,
N = length(Negative),
mean = mean(Negative),
sd = sd(Negative),
se = sd / sqrt(N)
)
##Figure 3
modelAGGNeg.effDeath <-effect("Treatment*Deaths",modelAGGNeg, se=TRUE)
plot(modelAGGNeg.effDeath, rescale.axis=F, rug=FALSE, ylab="Number of Negative Comments",
xlab="Maximum Skill Level Achieved", multiline= T)
modelAGGNeg.effKills <-effect("Treatment:Kills",modelAGGNeg, se=TRUE)
plot(modelAGGNeg.effKills, rescale.axis=F, rug=FALSE, ylab="Number of Negative Comments",
xlab="Maximum Skill Level Achieved", multiline= T)
#####
##Test full model to examine how relative performance to The Player affected negative comments with treatment as an interaction
modelAGGNegPerf<-glm(Negative ~ Treatment + SkillDiff + KillDiff + DeathDiff + Treatment*SkillDiff + Treatment*KillDiff + Treatment*DeathDiff, (family = poisson(link="log")), data=dataNoOutliers)
anova(modelAGGNegPerf, test="Chisq")
##Use standarrdized model to get stadardized Betas
modelAGGNegPerf.std<-glm(Negative ~ Treatment + stdSkillDiff + stdKillDiff + stdDeathDiff + Treatment*stdSkillDiff + Treatment*stdKillDiff + Treatment*stdDeathDiff, (family = poisson(link="log")), data=dataNoOutliers)
summary(modelAGGNegPerf.std)
########################
####Neutral comments####
##Test full model to examine the correlation between the number of Neutral Comments and game outcome, Treatment (i.e. condition), Kills, Skill level, and interactions of condition with Kills and Skill Level.
##Removing the two outliers
modelAGGNeutral<-glm(Neutral ~ GameOutcome + Treatment + Kills + Deaths + MaxSkillLevel+ Treatment*Kills + Treatment*Deaths + Treatment*MaxSkillLevel, (family = poisson(link="log")), data=data)
anova(modelAGGNeutral, test="Chisq")
##Use standarrdized model to get stadardized Betas
modelAGGNeutral.std<-glm(Neutral ~ GameOutcome + Treatment + stdKills + stdDeaths + stdSkill+ Treatment*stdKills + Treatment*stdDeaths + Treatment*stdSkill, (family = poisson(link="log")), data=data)
summary(modelAGGNeutral.std)
#####
##Test full model to examine how relative performance to The Player affected negative comments with treatment as an interaction
modelAGGNeutralPerf<-glm(Neutral ~ Treatment + SkillDiff + KillDiff + DeathDiff + Treatment*SkillDiff + Treatment*KillDiff + Treatment*DeathDiff, (family = poisson(link="log")), data=data)
anova(modelAGGNeutralPerf, test="Chisq")
##Use standarrdized model to get stadardized Betas
modelAGGNeutralPerf.std<-glm(Neutral ~ Treatment + stdSkillDiff + stdKillDiff + stdDeathDiff + Treatment*stdSkillDiff + Treatment*stdKillDiff + Treatment*stdDeathDiff, (family = poisson(link="log")), data=data)
summary(modelAGGNeutralPerf.std)
#####################################################################
####Testing for sexist responses -- Although only 11 sexist cases####
table(fem$Sexism)
Sexism=subset(data, Treatment=="Female")
modelSexism<-glm(Sexism ~ SkillDiff + KillDiff + DeathDiff, (family = binomial(link="logit")), data=Sexism)
anova(modelSexism, test="Chisq")
|
8fdf61aa572435357cd201b5944eb415ef506c2d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/gap.datasets/examples/hr1420.Rd.R
|
e7de974dabb6c5155731c88df0d6996cac7da69d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 191
|
r
|
hr1420.Rd.R
|
library(gap.datasets)
### Name: hr1420
### Title: An example data for Manhattan plot with annotation (mhtplot2)
### Aliases: hr1420
### Keywords: datasets
### ** Examples
head(hr1420)
|
b897811e67773cd1c829b18748f2517effb68698
|
8e0f351a163a775e07a6f9839a32f8a093d630bd
|
/barplots.R
|
46d1d860a5996d47067119ae4b2aa196be738497
|
[] |
no_license
|
GreggLab/R_Code
|
226e7d3ae29ad79aea5ee8925228c43fc462d404
|
aac59baa9851cfdd2b54b245ee36c8ec54be2ae2
|
refs/heads/master
| 2020-03-21T21:13:59.222508
| 2018-07-23T13:43:22
| 2018-07-23T13:43:22
| 139,053,555
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,399
|
r
|
barplots.R
|
#Stacked Community Plots
#6-19-18
#Adapted from Christine Bassis, Ph. D. code
#Adapted by Zach Carlson, email: zcarlson@umich.edu
library(RColorBrewer) #gives color schemes
library(ggplot2) #data plotter
library(gplots) #data plotter
library(vegan) #ecology package, diversity analysis, etc.
library(plyr) #tool for splitting, applying, and combining data
library(stringr) #loaded to use str_replace_all() which removes all special characters in a string
library(tm) #loaded to use removeNumbers() which removes any number in a string
library(shiny)
######ENTER IN VALUES######
TIMEPOINT <- as.character("8WK")
TITLE <- "8WK Relative Abundances"
TAXONOMY.FILE <- "combined.final.0.03.cons.taxonomy"
TIMEPOINT.FILE <- "combined.final.8wk.shared"
DESIGN.FILE <- 'combined_8WK.design.txt'#Cols: Row, SampleID, Group
PHYLA <- c("Actinobacteria", "Bacteria", "Bacteroidetes", "CandidatusSaccharibacteria","Chloroflexi", "Deferribacteres", "Firmicutes", "Proteobacteria", "Tenericutes", "Verrucomicrobia")
REL.FILE <- "otu.rel.t."
TXT <- ".txt"
TAXONOMY.COLOR.FILE <- "taxonomy.color."
OTU.COMPLETE <- "otu.complete."
###########################
#get into correct directory
# setwd("~/Downloads/Work/MothurFiles/C7C9_Combined/Stacked_Community_Plots") #mac users
setwd("H:/My Documents/MothurFiles/C7C9_Combined/Stacked_Community_Plots") #for windows users
#read in raw files
tax <- read.table(file=TAXONOMY.FILE,
row.names = 1,
header=TRUE,
check.names=FALSE,
comment.char="") #will become otu.class
otu.info <- read.table(file=TIMEPOINT.FILE, header=TRUE, row.names = 2) #will become rm_g
meta <- read.table(file=DESIGN.FILE, row.names = 1, header =TRUE)
#get length values to use later on
SAMPLE.LENGTH <- as.numeric(nrow(meta))
TAX.SAMPLE.LENGTH <- SAMPLE.LENGTH + 4 #adds OTU, phyla, genus, color
TAXONOMY.LENGTH <- as.numeric(nrow(tax))
otu.info <- subset(otu.info, select = -c(label, numOtus))
PHYLA.LENGTH <- length(PHYLA)
###CREATE OTU.CLASS FILE###
# - pull phlyum
# - pull genus
##following code changes label "OTU0001" to corresponding genus/phyla.##
#pull just taxonomy string
taxonomy <- as.vector(tax[,"Taxonomy"])
#create table to store phyla and genus values
otu.class <- data.frame(matrix(NA, nrow = TAXONOMY.LENGTH, ncol = 5)) #5 for OTU, Size, Phylum, Genus, Full
colnames(otu.class) <- c("OTU", "Size", "Phylum", "Genus", "Full")
otu.class$OTU <- rownames(tax) #add otu numbers
otu.class$Full <- tax$Taxonomy #add taxonomy
otu.class$Size <- tax$Size #add size
#changes lengthy taxonomic info to just a string of the phlya and genus
#sapply(): applys a function across a string
#strsplit(): splits string based on the character ";" and selects the 3rd value or 4th value (phlya and genus respectively)
for(i in 1:TAXONOMY.LENGTH){
string <- as.character(taxonomy[i]) #maintains character status
phyla <- sapply(strsplit(string,";"), `[`, 2) #should be 2
genus <- sapply(strsplit(string,";"), `[`, 6) #should be 6
phyla <- removeNumbers(phyla) #remove numbers
genus <- removeNumbers(genus) #remove numbers
phyla <- str_replace_all(phyla, "[[:punct:]]", "") #removes "(" ")" and all special characters
genus <- str_replace_all(genus, "[[:punct:]]", "") #removes "(" ")" and all special characters
phyla <- str_replace_all(phyla, "unclassified", "") #removes "unclassified"
genus <- str_replace_all(genus, "unclassified", "") #removes "unclassified"
otu.class[i,"Phylum"] <- phyla
otu.class[i,"Genus"] <- genus
}
###OTU.CLASS IS COMPLETED###
###CREATE RM_G FILE###
###PART 1. CREATE META COLOR FILE###
taxonomy.color <- data.frame(matrix(NA, nrow = TAXONOMY.LENGTH, ncol = 4)) #4 for OTU, Phylum, Genus, Color
colnames(taxonomy.color) <- c("OTU", "Phylum", "Genus", "Color")
taxonomy.color$OTU <- rownames(tax) #add otu numbers
taxonomy.color$Phylum <- otu.class$Phylum
taxonomy.color$Genus <- otu.class$Genus
colors <- as.vector(colorRampPalette(brewer.pal(9,"Pastel1"))(9))
colors[10] <- '#ef8f8d' #add a tenth color becaue pastel1 only offers 9
#ran table(taxonomy.color$Phylum) to get all different phylum present
#and in what frequency
#created color palette with as many colors as different phylum present
#assign colors based on phylum, non-conforming samples are assigned black
for(i in 1:TAXONOMY.LENGTH){
if(taxonomy.color$Phylum[i] == 'Actinobacteria'){
taxonomy.color$Color[i] <- colors[10]
}
else if(taxonomy.color$Phylum[i] == 'Bacteria'){
taxonomy.color$Color[i] <- colors[9]
}
else if(taxonomy.color$Phylum[i] == 'Bacteroidetes'){
taxonomy.color$Color[i] <- colors[8]
}
else if(taxonomy.color$Phylum[i] == 'CandidatusSaccharibacteria'){
taxonomy.color$Color[i] <- colors[7]
}
else if(taxonomy.color$Phylum[i] == 'Chloroflexi'){
taxonomy.color$Color[i] <- colors[6]
}
else if(taxonomy.color$Phylum[i] == 'Deferribacteres'){
taxonomy.color$Color[i] <- colors[5]
}
else if(taxonomy.color$Phylum[i] == 'Firmicutes'){
taxonomy.color$Color[i] <- colors[4]
}
else if(taxonomy.color$Phylum[i] == 'Proteobacteria'){
taxonomy.color$Color[i] <- colors[3]
}
else if(taxonomy.color$Phylum[i] == 'Tenericutes'){
taxonomy.color$Color[i] <- colors[2]
}
else if(taxonomy.color$Phylum[i] == 'Verrucomicrobia'){
taxonomy.color$Color[i] <- colors[1]
}
else{
taxonomy.color$Color[i] <- '#000000' #assigns black to unclassified
}
}
###PART 1. META COLOR FILE IS COMPLETE###
###PART 2. CREATE RELATIVE ABUNDANCE FILE###
otu.matrix <- as.matrix(otu.info)
otu.rel <- otu.matrix/rowSums(otu.matrix) #check with 'rowSums(otu.rel)' all rows = 1
#otu.rel <- subset(otu.rel, select = -c(label, numOtus))
otu.rel.t <- t(otu.rel) #transpose #check with 'colSums(otu.rel.t)' all columns = 1
otu.rel.t <- as.data.frame(otu.rel.t) #make it so we can add back in OTU names without changing to list
otu.rel.t$OTU <- rownames(otu.rel.t) #add OTUs column so we can merge
rm_g <- merge(taxonomy.color, otu.rel.t, by.x = "OTU", by.y = "OTU")
###PART 2. COMPLETE###
### rm_g FILE COMPLETED ###
otubar <- as.matrix(subset(rm_g, select =-c(Genus, Color, OTU))) #delete all columns
rownames(otubar) <- otubar[,"Phylum"] #keep phylum names by saving them as rownames
otubar <- subset(otubar, select = -c(Phylum)) #delete phylum column so all cells are numeric
barg <- as.data.frame(t(otubar))
barg$SampleID <- meta$SampleID #add IDs
barg$Group <- meta$Group #add groups
col.gen <- as.character(rm_g$Color)
bar_ordered<- barg[order(barg$Group, barg$SampleID),] #order table for splitting
#splits mets and controls
all <- split(bar_ordered, bar_ordered$Group)
met <- all$'MetPN'
ctrl <- all$'CtrlPN'
MET.LENGTH <- as.numeric(nrow(met))
CTRL.LENGTH <- as.numeric(nrow(ctrl))
FINAL.LENGTH <- as.numeric(ncol(met))
###MAKE MET FILE###
barmet <- subset(met, select = -c(SampleID, Group))
barmet <- as.matrix(barmet)
class(barmet) <- "numeric" #change matrix to numeric form rather than character
colnames(barmet) <- rm_g[,"Phylum"] #removes numbers from colnames. e.g. Bacteroidetes.1 -> Bacteroidetes
###phyla only distribution###
rows.length <- as.numeric(nrow(barmet))
cols.length <- as.numeric(ncol(barmet))
#make sum columns
barmet <- as.data.frame(barmet)
barmet$ActinobacteriaSums <- rep.int(0, nrow(barmet))
barmet$BacteriaSums <- rep.int(0, nrow(barmet))
barmet$BacteroidetesSums <- rep.int(0, nrow(barmet))
barmet$CandidatusSaccharibacteriaSums <- rep.int(0, nrow(barmet))
barmet$ChloroflexiSums <- rep.int(0, nrow(barmet))
barmet$DeferribacteresSums <- rep.int(0, nrow(barmet))
barmet$FirmicutesSums <- rep.int(0, nrow(barmet))
barmet$ProteobacteriaSums <- rep.int(0, nrow(barmet))
barmet$TenericutesSums <- rep.int(0, nrow(barmet))
barmet$VerrucomicrobiaSums <- rep.int(0, nrow(barmet))
for(i in 1:rows.length){
for(j in 1:cols.length){
if(colnames(barmet)[j] == 'Actinobacteria'){
barmet$ActinobacteriaSums[i] <- barmet[i,j] + barmet$ActinobacteriaSums[i]
}
else if(colnames(barmet)[j] == 'Bacteria'){
barmet$BacteriaSums[i] <- barmet[i,j] + barmet$BacteriaSums[i]
}
else if(colnames(barmet)[j] == 'Bacteroidetes'){
barmet$BacteroidetesSums[i] <- barmet[i,j] + barmet$BacteroidetesSums[i]
}
else if(colnames(barmet)[j] == 'CandidatusSaccharibacteria'){
barmet$CandidatusSaccharibacteriaSums[i] <- barmet[i,j] + barmet$CandidatusSaccharibacteriaSums[i]
}
else if(colnames(barmet)[j] == 'Chloroflexi'){
barmet$ChloroflexiSums[i] <- barmet[i,j] + barmet$ChloroflexiSums[i]
}
else if(colnames(barmet)[j] == 'Deferribacteres'){
barmet$DeferribacteresSums[i] <- barmet[i,j] + barmet$DeferribacteresSums[i]
}
else if(colnames(barmet)[j] == 'Firmicutes'){
barmet$FirmicutesSums[i] <- barmet[i,j] + barmet$FirmicutesSums[i]
}
else if(colnames(barmet)[j] == 'Proteobacteria'){
barmet$ProteobacteriaSums[i] <- barmet[i,j] + barmet$ProteobacteriaSums[i]
}
else if(colnames(barmet)[j] == 'Tenericutes'){
barmet$TenericutesSums[i] <- barmet[i,j] + barmet$TenericutesSums[i]
}
else if(colnames(barmet)[j] == 'Verrucomicrobia'){
barmet$VerrucomicrobiaSums[i] <- barmet[i,j] + barmet$VerrucomicrobiaSums[i]
}
}
}
#add sums to new table
sums <- data.frame(matrix(NA, nrow = MET.LENGTH, ncol = PHYLA.LENGTH))
colnames(sums) <- PHYLA
rownames(sums) <- rownames(barmet)
sums$Actinobacteria <- barmet$ActinobacteriaSums
sums$Bacteria <- barmet$BacteriaSums
sums$Bacteroidetes <- barmet$BacteroidetesSums
sums$CandidatusSaccharibacteria <- barmet$CandidatusSaccharibacteriaSums
sums$Chloroflexi <- barmet$ChloroflexiSums
sums$Deferribacteres <- barmet$DeferribacteresSums
sums$Firmicutes <- barmet$FirmicutesSums
sums$Proteobacteria <- barmet$ProteobacteriaSums
sums$Tenericutes <-barmet$TenericutesSums
sums$Verrucomicrobia <- barmet$VerrucomicrobiaSums
#transpose for graphing
sums <- as.data.frame(sums)
sums.t <- t(sums)
class(sums.t) <- "numeric"
###FINISH MAKING MET FILE###
###MAKE CTRL FILE###
barctrl <- subset(ctrl, select = -c(SampleID, Group))
barctrl <- as.matrix(barctrl)
class(barctrl) <- "numeric" #change matrix to numeric form rather than character
colnames(barctrl) <- rm_g[,"Phylum"] #removes numbers from colnames. e.g. Bacteroidetes.1 -> Bacteroidetes
###phyla only distribution###
rows.length <- as.numeric(nrow(barctrl))
cols.length <- as.numeric(ncol(barctrl))
#make sum columns
barctrl <- as.data.frame(barctrl)
barctrl$ActinobacteriaSums <- rep.int(0, nrow(barctrl))
barctrl$BacteriaSums <- rep.int(0, nrow(barctrl))
barctrl$BacteroidetesSums <- rep.int(0, nrow(barctrl))
barctrl$CandidatusSaccharibacteriaSums <- rep.int(0, nrow(barctrl))
barctrl$ChloroflexiSums <- rep.int(0, nrow(barctrl))
barctrl$DeferribacteresSums <- rep.int(0, nrow(barctrl))
barctrl$FirmicutesSums <- rep.int(0, nrow(barctrl))
barctrl$ProteobacteriaSums <- rep.int(0, nrow(barctrl))
barctrl$TenericutesSums <- rep.int(0, nrow(barctrl))
barctrl$VerrucomicrobiaSums <- rep.int(0, nrow(barctrl))
for(i in 1:rows.length){
for(j in 1:cols.length){
if(colnames(barctrl)[j] == 'Actinobacteria'){
barctrl$ActinobacteriaSums[i] <- barctrl[i,j] + barctrl$ActinobacteriaSums[i]
}
else if(colnames(barctrl)[j] == 'Bacteria'){
barctrl$BacteriaSums[i] <- barctrl[i,j] + barctrl$BacteriaSums[i]
}
else if(colnames(barctrl)[j] == 'Bacteroidetes'){
barctrl$BacteroidetesSums[i] <- barctrl[i,j] + barctrl$BacteroidetesSums[i]
}
else if(colnames(barctrl)[j] == 'CandidatusSaccharibacteria'){
barctrl$CandidatusSaccharibacteriaSums[i] <- barctrl[i,j] + barctrl$CandidatusSaccharibacteriaSums[i]
}
else if(colnames(barctrl)[j] == 'Chloroflexi'){
barctrl$ChloroflexiSums[i] <- barctrl[i,j] + barctrl$ChloroflexiSums[i]
}
else if(colnames(barctrl)[j] == 'Deferribacteres'){
barctrl$DeferribacteresSums[i] <- barctrl[i,j] + barctrl$DeferribacteresSums[i]
}
else if(colnames(barctrl)[j] == 'Firmicutes'){
barctrl$FirmicutesSums[i] <- barctrl[i,j] + barctrl$FirmicutesSums[i]
}
else if(colnames(barctrl)[j] == 'Proteobacteria'){
barctrl$ProteobacteriaSums[i] <- barctrl[i,j] + barctrl$ProteobacteriaSums[i]
}
else if(colnames(barctrl)[j] == 'Tenericutes'){
barctrl$TenericutesSums[i] <- barctrl[i,j] + barctrl$TenericutesSums[i]
}
else if(colnames(barctrl)[j] == 'Verrucomicrobia'){
barctrl$VerrucomicrobiaSums[i] <- barctrl[i,j] + barctrl$VerrucomicrobiaSums[i]
}
}
}
sums.ctrl <- data.frame(matrix(NA, nrow = CTRL.LENGTH, ncol = PHYLA.LENGTH))
colnames(sums.ctrl) <- PHYLA
rownames(sums.ctrl) <- rownames(barctrl)
sums.ctrl$Actinobacteria <- barctrl$ActinobacteriaSums
sums.ctrl$Bacteria <- barctrl$BacteriaSums
sums.ctrl$Bacteroidetes <- barctrl$BacteroidetesSums
sums.ctrl$CandidatusSaccharibacteria <- barctrl$CandidatusSaccharibacteriaSums
sums.ctrl$Chloroflexi <- barctrl$ChloroflexiSums
sums.ctrl$Deferribacteres <- barctrl$DeferribacteresSums
sums.ctrl$Firmicutes <- barctrl$FirmicutesSums
sums.ctrl$Proteobacteria <- barctrl$ProteobacteriaSums
sums.ctrl$Tenericutes <-barctrl$TenericutesSums
sums.ctrl$Verrucomicrobia <- barctrl$VerrucomicrobiaSums
sums.ctrl <- as.data.frame(sums.ctrl)
sums.ctrl.t <- t(sums.ctrl)
class(sums.ctrl.t) <- "numeric"
sums.total <- cbind(sums.t, sums.ctrl.t)
colnames(sums.total) <- c(rep("Met PN", MET.LENGTH), rep("Ctrl PN", CTRL.LENGTH))
# graphing both sets:
par(mfrow=c(1,1)) #creates a plot of two columns and one row
par(mar=c(3.3,3,2,1))
par(xpd=T)
barplot(sums.total,
las=2,
main=TITLE,
ylab="Relative Abundance",
cex.names=.8,
ylim=c(0,1),
col=colors,
xlim=c(0,40),
space=c(rep(0.2, MET.LENGTH), 1.5, rep(0.2, CTRL.LENGTH-1)))
legend.x <- MET.LENGTH + CTRL.LENGTH + 5
legend(legend.x, 1,
legend=rownames(sums.t),
col=colors,
fill=colors,
cex=1,
bty="n",
ncol=1)
#
# not.preg_g<-as.matrix(t(barnot.preg))
# par(mar=c(5,4,2,5))
# par(xpd=T)
# barplot(not.preg_g, las=2, main="Not Pregnant", ylab="Relative abundance", cex.names=0.8, ylim=c(0,1), col=col.gen, xlim=c(0,100))
# ###Note: Had to mess with legend position (check position with locator before plotting 2nd bar plot), when exported all colors/species didn't show up, ended up combining from a couple tries
|
8e92382db331d706e4543915f2d18e4cf7334676
|
a61b76d7649cea8e9d70608852592ac9ee6bd8dd
|
/codes/MedSchool.R
|
89a64d1a2fdcbbe9f75e7fd50173610edfa35cd7
|
[] |
no_license
|
benjaminsw/Generalised_Linear_Models
|
fdb7ad08e4f5394a4d792b8a159def1330b8cf42
|
2ff85b8193b84ddc1d0c5e26fe1e8dd8e24e0387
|
refs/heads/main
| 2023-03-04T04:20:17.613245
| 2021-01-27T11:10:47
| 2021-01-27T11:10:47
| 330,643,819
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,210
|
r
|
MedSchool.R
|
# Admissions to medical school
# Load the package containing the data
library(Stat2Data)
data(MedGPA)
# Plot the data allowing for some jitter to better see overlapping points:
library(ggplot2)
medgpa.plot <- ggplot(data = MedGPA, aes(y = Acceptance, x = GPA)) +
geom_jitter(width =0, height =0.01, alpha =0.5, colour ="#984ea3")
medgpa.plot <- medgpa.plot +
theme(panel.background = element_rect( fill = "transparent", colour = NA),
plot.background = element_rect( fill = "transparent", colour = NA),
panel.border = element_rect(fill = NA, colour = "black", size = 1))
medgpa.plot + geom_smooth(method = "lm", se = FALSE,
fullrange = TRUE, colour = "#984ea3")
# Linear model:
med.lm <- lm(Acceptance ~ GPA, data=MedGPA)
summary(med.lm)
# Logistic regression (logit) model:
med.glm <- glm(Acceptance ~ GPA, data = MedGPA, family = binomial)
summary(med.glm)
# Overlay the fitted logistic regression curve onto the previous plot:
medgpa.plot +
geom_smooth(method = "lm",se = FALSE, fullrange = TRUE, colour = "#984ea3") +
geom_smooth(method = "glm", color = "#ff7f00",se = FALSE, fullrange = TRUE,
method.args = list(family = "binomial"))
|
139bff6e09fc35109efdfe850836b7abd2dbf0d0
|
42fdc59279057501501f134e2187ec4dc6194b2b
|
/rscripts/permutation_bootstraps_server.R
|
bb97455c18390be02aebca4d98b7f16c11da62f7
|
[] |
no_license
|
majpark21/Master_project
|
4904ec182b512aae1ddc74d06963c9fe30413b07
|
467a42f594099850ec0eb4b5e0edbb2cedb8cb49
|
refs/heads/master
| 2021-03-16T09:17:09.600691
| 2018-02-11T23:27:43
| 2018-02-11T23:27:43
| 98,453,350
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,018
|
r
|
permutation_bootstraps_server.R
|
library(stringr)
library(data.table)
library(plyr)
CastCluster <- function(data, time.col, condition.col, label.col, measure.col, k.clust, na.fill, plot = T, return.quality = T, ...){
# Cast to wide, cluster and get quality indices
require(dtwclust)
temp <- myCast(data, time.col, condition.col, label.col, measure.col, na.fill)
# Make clustering, and get quality indexes
clust <- tsclust(temp$casted.matrix, type = "partitional", k = k.clust, distance = "dtw_basic", centroid = "pam", seed = 42, ...)
names(clust) <- paste0("k_", k.clust)
quality <- sapply(clust, cvi, type = "internal")
# Add a column with the clusters to the casted table
cluster.table <- temp$casted
for(k in 1:length(clust)){
cluster.table <- cbind(clust[[k]]@cluster, cluster.table)
colnames(cluster.table)[1] <- names(clust)[k]
}
# Plot
if(plot){
require(ggplot2)
mquality <- melt(quality)
names(mquality) <- c("Stat", "Nb.Clust", "value")
plot(ggplot(mquality, aes(x=Nb.Clust, y = value)) + geom_col(aes(group = Nb.Clust, fill = Nb.Clust)) + facet_grid(Stat ~ ., scales = "free_y"))
}
# Output
if(return.quality) return(list(cluster = clust, table = cluster.table, quality = quality))
else return(list(out = clust, table = cluster.table))
}
myCast <- function(data, time.col, condition.col, label.col, measure.col, na.fill){
# Only cast to wide matrix
temp <- copy(data)
# dcast can change the order of the rows depending on the orde rin which the keyed columns are passed, keep the casted table in addition to the matrix to make the link afterwards
temp <- dcast(temp, get(condition.col) + get(label.col) ~ get(time.col), value.var = measure.col)
temp2 <- as.matrix(temp[, c(-1, -2)]) # remove 2 first columns with labels
temp2[which(is.na(temp2))] <- na.fill
return(list(casted = temp, casted.matrix = temp2))
}
plot_cluster <- function(data, id.vars.col, cluster.col, type){
# Plot clusters directly from output$table of CastCluster
# id.vars.col: given in indices (include ALL clustering columns)
# cluster.col: name of the column with clustering to plot
library(ggplot2)
ids <- colnames(data)[id.vars.col]
melted <- melt(data, id.vars = ids)
if(type=="trajectory"){
ggplot(melted, aes(x = as.numeric(variable), y = value)) + geom_line(aes(group = label.col)) +
facet_wrap(as.formula(paste("~",cluster.col))) + stat_summary(fun.y=mean, geom="line", colour = "blue", size = 1.5) + xlab("Time")
} else if(type=="composition"){
melted[, c(cluster.col):=as.factor(get(cluster.col))]
ggplot(melted, aes_string(cluster.col)) + geom_bar(aes(fill=condition.col))
}
}
sep.meas.along.time <- function(data1, data2, time.col, measure.col){
timev <- unique(data1[, get(time.col)])
if(!(identical(unique(data2[, get(time.col)]), timev))) stop("Time vectors must be identical between the two data")
out <- separability.measures(data1[get(time.col)==timev[1], get(measure.col)], data2[get(time.col)==timev[1], get(measure.col)])
for(t in timev[2:length(timev)]){
out <- rbind(out, separability.measures(data1[RealTime==t, get(measure.col)], data2[RealTime==t, get(measure.col)]))
}
out <- cbind(timev, out)
return(out)
}
one.permutation.auc <- function(x, y, metric){
n <- nrow(x)
m <- nrow(y)
temp <- rbind(x, y)
samp.traj <- sample(1:nrow(temp), size = n, replace = FALSE)
x.resamp <- temp[samp.traj, ]
y.resamp <- temp[setdiff(1:nrow(temp), samp.traj), ]
seps <- sapply(1:ncol(x), function(j) separability.measures(x.resamp[, j], y.resamp[, j]))
return(sum(unlist(seps[metric, ])))
}
permutation.auc <- function(x, y, n, metric = "jm"){
# x,y: two matrices representing time series, row: trajectory; col: time
# n: number of permutations
# metric: one of "jm", "bh", "div", "tdiv", "ks"
if(ncol(x) != ncol(y)) stop("x and y must have same number of columns")
return(replicate(n, one.permutation.auc(x,y,metric)))
}
wrap_perm <- function(x, y, meas, n){
a <- myCast(x, "RealTime", "Condition", "Label", meas, 1100)$casted.matrix
b <- myCast(y, "RealTime", "Condition", "Label", meas, 1100)$casted.matrix
return(permutation.auc(a, b, n))
}
one.bootstrap.auc.percol <- function(x, y, metric){
samp.col <- sample(1:ncol(x), size = ncol(x), replace = TRUE)
x.resamp <- x[, samp.col]
y.resamp <- y[, samp.col]
seps <- sapply(1:ncol(x), function(j) separability.measures(x.resamp[, j], y.resamp[, j]))
return(sum(unlist(seps[metric, ])))
}
bootstrap.auc.percol <- function(x, y, B, metric = "jm"){
# x,y: two matrices representing time series, row: trajectory; col: time
# B: number of boostraps
# metric: one of "jm", "bh", "div", "tdiv", "ks"
if(ncol(x) != ncol(y)) stop("x and y must have same number of columns")
return(replicate(B, one.bootstrap.auc.percol(x,y,metric)))
}
wrap_bootcol <- function(x, y, meas, n){
a <- myCast(x, "RealTime", "Condition", "Label", meas, 1100)$casted.matrix
b <- myCast(y, "RealTime", "Condition", "Label", meas, 1100)$casted.matrix
return(bootstrap.auc.percol(a, b, n))
}
# Load data
Yanni <- fread("../input/sust_E_F_N.csv")
gf <- str_extract(Yanni$Stim_All_Ch, "[E,F,N]")
conc <- str_extract(Yanni$Stim_All_Ch, "(([0-9]+\\.[0-9]*)|([0-9]+))")
Yanni$Condition <- paste(gf, conc, sep = "-")
Yanni[, c("TrackObjects_Label_uni","Condition") := list(as.factor(TrackObjects_Label_uni), as.factor(Condition))]
setkey(Yanni, Condition, TrackObjects_Label_uni)
setnames(Yanni, c("Intensity_MeanIntensity_Ratio", "TrackObjects_Label_uni"), c("Ratio","Label"))
rm(gf, conc)
setcolorder(Yanni, c("Condition", "Label", "Ratio", "RealTime", "Metadata_Series", "Metadata_T", "TrackObjects_Label","Stim_All_Ch", "Stim_All_S"))
del.cols <- names(Yanni)[5:9]
Yanni[, (del.cols) := NULL]
set(Yanni, i = which(Yanni[, Ratio] > 1800), j=3L, value = 1100) # Ratio is the 3rd column
# Compute AUC
# Get all pairs of conditions
conditions <- combn(as.character(unique(Yanni[,Condition])), m = 2)
conditions <- conditions[,c(1:3,12,13,22, 39:41,46,47,52, 61:66)]
max.val <- sqrt(2) * 101
# Compute separabilities of conditions at each time point
sep.meas.raw <- apply(conditions, 2, function(x) sep.meas.along.time(Yanni[Condition==x[1]], Yanni[Condition==x[2]], "RealTime", "Ratio" ))
names(sep.meas.raw) <- apply(conditions, 2, function(x) paste(x[1], x[2], sep = ","))
# Go to data table
for(i in 1:length(sep.meas.raw)){
temp <- unlist(strsplit(names(sep.meas.raw)[i], ","))
sep.meas.raw[[i]]$Cond1 <- temp[1]
sep.meas.raw[[i]]$Cond2 <- temp[2]
}
sep.meas.raw <- as.data.table(rbind.fill(sep.meas.raw))
#sep.meas.raw[, c("Cond1", "Cond2") := list(as.factor(Cond1), as.factor(Cond2))]
auc.raw <- sep.meas.raw[, .(auc = sum(jm, na.rm = T)/max.val), by = c("Cond1", "Cond2")] # a few NAs, slight bias in the values
auc.raw[, comb.cond := as.factor(paste(Cond1, Cond2, sep = ";"))]
auc.raw[, GF := as.factor(paste0(str_sub(Cond1,1,1), "GF"))]
# Distance matrix
get.dist.matrix <- function(dat){
data <- copy(dat)
data[, Cond1 := factor(data$Cond1, levels = union(data$Cond1, data$Cond2))]
data[, Cond2 := factor(data$Cond2, levels = union(data$Cond1, data$Cond2))]
dist.mat <- dcast(data=data[,1:3], formula = Cond1 ~ Cond2, value.var = "auc", drop = F)
dist.mat <- as.matrix(dist.mat[,-1])
rownames(dist.mat) <- colnames(dist.mat)
dist.mat[which(is.na(dist.mat))] <- 0
dist.mat <- dist.mat + t(dist.mat)
return(dist.mat)
}
dist.raw.EGF <- get.dist.matrix(auc.raw[GF=="EGF"])
dist.raw.FGF <- get.dist.matrix(auc.raw[GF=="FGF"])
dist.raw.NGF <- get.dist.matrix(auc.raw[GF=="NGF"])
# Permutation tests and bootstraps
nperm <- 25
set.seed(7)
auc.perm <- apply(conditions, 2, function(x) wrap_perm(Yanni[Condition==x[1]], Yanni[Condition==x[2]], "Ratio", nperm))
bootcol <- apply(conditions, 2, function(x) wrap_bootcol(Yanni[Condition==x[1]], Yanni[Condition==x[2]], "Ratio", nperm))
save(auc.perm, file = "../output_perm_boot/auc.perm.Robj")
save(bootcol, file = "../output_perm_boot/bootcol.Robj")
|
66e5e463c308f2bdd8d804eebbc2198233de5477
|
2df42b13fef6978ad09b407c6791031a959f449f
|
/man/plot_pred_box.Rd
|
9dc659a433a92858389954088465ad21edbe567d
|
[] |
no_license
|
Sea2Data/Rstox
|
4284021138ea244eaaccded3f7728f9cc06cb03d
|
71367f11deec42791e809c28cdf7752c5c6ca1f3
|
refs/heads/master
| 2023-03-07T00:03:22.039374
| 2019-02-08T22:40:17
| 2019-02-08T22:40:17
| 90,259,495
| 1
| 3
| null | 2022-01-05T12:36:08
| 2017-05-04T12:15:33
|
R
|
UTF-8
|
R
| false
| true
| 669
|
rd
|
plot_pred_box.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RECA_plots.R
\name{plot_pred_box}
\alias{plot_pred_box}
\title{Plot catch by age prediction as boxplots}
\usage{
plot_pred_box(pred, var, unit, xlab = "age",
ylab = paste("posterior catch", unit), ...)
}
\arguments{
\item{pred}{RECA prediction object as returned by eca::eca.predict}
\item{var}{A key string indicating the variable to plot. 'Abundance' and 'Weight' is implemented.}
\item{unit}{A unit key string indicating the unit (see getPlottingUnit()$definitions$unlist.units for available key strings)}
}
\description{
Plot catch by age prediction as boxplots
}
\keyword{internal}
|
92340dab67ee69142f0e4c2f62d3ae83a31a69dd
|
0c89428a775d9095ebce45fc434bfd000a2e9147
|
/plot3.R
|
57cfca982632f9661fcf37fe45236cddce509ff4
|
[] |
no_license
|
ericsturman/ExData_Plotting1
|
4d07fc273ed2c18f820a9e24c3fe182ed5b2967d
|
7223ff385f42cec5063000b88bcd8e3a299908d9
|
refs/heads/master
| 2020-04-03T18:18:38.434063
| 2018-11-03T15:29:00
| 2018-11-03T15:29:00
| 155,478,394
| 0
| 0
| null | 2018-10-31T01:12:54
| 2018-10-31T01:12:53
| null |
UTF-8
|
R
| false
| false
| 1,039
|
r
|
plot3.R
|
library(lubridate)
# read in data and add datetime field
householdPower<-read.csv("household_power_consumption.txt", sep=";", stringsAsFactors=FALSE)
householdChartData<-householdPower[as.Date(householdPower$Date, "%d/%m/%Y") >= as.Date(ymd("20070201")) & as.Date(householdPower$Date, "%d/%m/%Y") <= as.Date(ymd("20070202")),]
# Create datetime column for plots 2-4
householdChartData$datetime=strptime(paste(householdChartData[,1], householdChartData[,2]), format="%d/%m/%Y %H:%M:%S")
png(filename = "plot3.png",height=480, width=480)
plot(householdChartData$datetime, as.numeric(householdChartData$Sub_metering_1), xlab="",ylab="Energy sub metering", type="n")
lines(householdChartData$datetime, as.numeric(householdChartData$Sub_metering_1))
lines(householdChartData$datetime, as.numeric(householdChartData$Sub_metering_2), col="red")
lines(householdChartData$datetime, as.numeric(householdChartData$Sub_metering_3), col="blue")
legend("topright", legend=names(householdChartData[7:9]),col=c("black","red", "blue"), lty=1)
dev.off()
|
5beced314a2ee83226727990bd718b6a66cfa97b
|
a6284a5c546f4a0bc827e20289bdbc6fcaf28172
|
/man/Levi_Tripathy.Rd
|
0528df8e363da19cacfe46e0568dfaa51f7ad905
|
[] |
no_license
|
cran/modelfree
|
6f428c443f94f2181425aeb621fe3eca88b9a688
|
453b3c46c5171ef6765cc519a359214fd0bc906d
|
refs/heads/master
| 2023-05-26T11:52:10.754440
| 2023-05-20T06:40:02
| 2023-05-20T06:40:02
| 17,697,617
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,197
|
rd
|
Levi_Tripathy.Rd
|
\name{Levi_Tripathy}
\alias{Levi_Tripathy}
\docType{data}
\title{
Visual detection of path deviation
}
\description{
The subject was presented with the image of a dot moving rightwards on a linear path until it reached the midline of the display, when it changed direction either upwards or downwards. The subject had to indicate the direction. The symbols in the figure below show the proportion of correct responses in 30 trials as the deviation varied from –3 to 3 units. See \url{https://personalpages.manchester.ac.uk/staff/david.foster/software-modelfree/latest/examples/example02.html}
}
\usage{data("Levi_Tripathy")}
\format{
A data frame with 7 rows and 3 columns.
\describe{
\item{\code{x}}{stimulus level}
\item{\code{r}}{number of successes}
\item{\code{m}}{number of trials}
}
}
\references{
Levi, D. M. & Tripathy, S. P. “Is the ability to identify deviations in multiple trajectories compromised by amblyopia?”, Journal of Vision, 6(12), 1367-1379, 2006.
}
\examples{
data("Levi_Tripathy")
x = Levi_Tripathy$x
r = Levi_Tripathy$r
m = Levi_Tripathy$m
plot( x, r / m, xlim = c( -2.87, 2.87 ), ylim = c( 0.03, 0.97 ), type = "p", pch="*" )
}
\keyword{datasets}
|
5194b52624199a17590bb262546751c67d954cc0
|
42a0668a13259bb4301a86ebf592dbadc83f82b2
|
/Fase 2/ANOVA/anova.r
|
0ff2bc1e59289fd70b4d79f9b248e1f139f1eb87
|
[] |
no_license
|
2kodevs/Student-Performance-Analysis
|
5c56c7c82a97216a17dd0f9c141ef8775aae230d
|
1aa8c91e2db72fa177911be5ccdd4c0e85cd9228
|
refs/heads/master
| 2021-02-06T02:42:18.715583
| 2020-10-15T16:27:11
| 2020-10-15T16:27:11
| 243,866,573
| 0
| 0
| null | 2020-10-15T16:27:12
| 2020-02-28T22:35:30
|
TeX
|
UTF-8
|
R
| false
| false
| 2,916
|
r
|
anova.r
|
require(lmtest)
box_plot <- function(df, label) {
title <- 'Box-plot de las medias por zona'
png(paste('images/box-', label, '.png', sep = ''))
plot(
df$measure ~ df$addr,
data=df,
xlab="Zona",
ylab="Resultado medio",
main=title
)
dev.off()
}
qq_plot <- function(residuals, label) {
png(paste('images/qq-', label, '.png', sep=''))
qqnorm(residuals)
qqline(residuals)
dev.off()
}
hist_plot <- function(residuals, label) {
png(paste('images/hist-', label, '.png', sep=''))
hist(residuals)
dev.off()
}
std_plot <- function(anova, label) {
png(paste('images/std-', label, '.png', sep=''))
plot(
anova$fitted.values,
rstudent(anova),
ylab='Residuals',
xlab='Predictions',
main='Anova Residuals'
)
abline(h = 0, lty = 2)
dev.off()
}
plot_assumptions <- function(anova, label) {
png(paste('images/all-', label, '.png', sep=''))
layout(matrix(c(1,2,3,4), 2, 2, byrow=T))
plot(
anova$fitted.values,
rstudent(anova),
ylab='Residuals',
xlab='Predictions',
main='Anova Residuals'
)
abline(h = 0, lty = 2)
residuals <- anova$residuals
hist(residuals)
qqnorm(residuals)
qqline(residuals)
dev.off()
}
test_assumptions <- function(anova, df) {
print(" ++ Shapiro ++ ")
print(shapiro.test(anova$residuals))
print(" ++ Bartlett ++ ")
print(bartlett.test(anova$residuals, df$addr))
print(" ++ Durbin-Watson ++ ")
print(dwtest(anova))
}
make_model <- function(df, label) {
# Box Plot
box_plot(df, label)
# Anova analysis
anova <- aov(df$measure ~ df$addr, data=df)
res <- anova$residuals
print(paste('---------', label, '---------'))
print(' ++ Anova ++ ')
print(summary(anova))
# Check anova residual assumptions
# Test
test_assumptions(anova, df)
# Plots
plot_assumptions(anova, label)
# Plot in separated images
std_plot(anova, label)
hist_plot(res, label)
qq_plot(res, label)
return()
}
scores <- function(data, i) {
return(mean(c(data$G1.x[i], data$G2.x[i], data$G3.x[i])))
}
load_data <- function(root) {
data <- read.csv(root)
lvl1 <- c()
lvl2 <- c()
lvls <- levels(data$address)
ln <- length(data$sex)
for(i in 1:ln){
if (data$address[i] == lvls[1]) {
lvl1 <- c(lvl1, scores(data, i))
} else {
lvl2 <- c(lvl2, scores(data, i))
}
}
amount <- min(length(lvl1), length(lvl2))
measure <- c(lvl1[1:amount], lvl2[1:amount])
addr <- c(rep(lvls[1], amount), rep(lvls[2], amount))
return(data.frame(addr, measure))
}
main <- function() {
cat("Enter the csv dir:\n")
df <- load_data(readLines("stdin", n=1))
make_model(df, 'zona')
}
main()
|
0152e568c8b5bebfa5ef4587430d052fd8e33671
|
5b4722beafbbf6f88b177b8f1455f22d74411a0a
|
/Aditya/Project Script-Aditya Ratna.R
|
2b6bc54cd3788c180de1e548bfaf5bcd37844e4b
|
[] |
no_license
|
umangmystery/craigslist-r
|
75462a22c3e5eab847e677cee89ff63b3d503bb8
|
8064b3076f0c2555cb5e0f7bbff286336ddee568
|
refs/heads/main
| 2023-04-07T07:33:04.981592
| 2021-04-18T22:58:11
| 2021-04-18T22:58:11
| 349,236,324
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,240
|
r
|
Project Script-Aditya Ratna.R
|
# Odometer is empty / is null
# Skewness check
# Price category cut bins changed
# ODometer and price scatter plot
#
# installing required packages
# install.packages("readxl")
# install.packages("lubridate")
# install.packages("writexl")
# install.packages("funModeling")
# install.packages("tidyverse")
# install.packages("ggplot2")
# install.packages("e1071")
# install.packages("treemap")
# install.packages("hrbrthemes")
# install.packages("treemapify")
# install.packages("forcats")
# install.packages("reshape2")
# install.packages("mapproj")
# install.packages("maps")
# install.packages("fitdistrplus")
install.packages("mlbench")
install.packages("caret")
#loading libraries
library(readxl)
library(reshape2)
library(lubridate)
library(tidyverse)
library(funModeling)
library(writexl)
library(ggplot2)
library(e1071)
library(forcats)
library(hrbrthemes)
library(treemap)
library(treemapify)
library(maps)
library(mapproj)
library(fitdistrplus)
library(mlbench)
library(caret)
setwd("/Users/adityaratna/Desktop/Study/Probability and Statistics/IE Project")
getwd()
#Reading the data file
Vehicle_Data<-read_xlsx("vehicles_changed.xlsx")
StateName <- read_xlsx("StateNames.xlsx")
head(Vehicle_Data)
#-------------------------------------------------------------
#Data Processing
#Removing all null values
#Vehicle_Data <- na.omit(Vehicle_Data)
#Converting few column values to factors
Vehicle_Data$manufacturer <- as.factor(Vehicle_Data$manufacturer)
Vehicle_Data$condition <- as.factor(Vehicle_Data$condition)
Vehicle_Data$cylinders <- as.factor(Vehicle_Data$cylinders)
Vehicle_Data$fuel <- as.factor(Vehicle_Data$fuel)
Vehicle_Data$title_status <- as.factor(Vehicle_Data$title_status)
Vehicle_Data$transmission <- as.factor(Vehicle_Data$transmission)
Vehicle_Data$drive <- as.factor(Vehicle_Data$drive)
Vehicle_Data$size <- as.factor(Vehicle_Data$size)
Vehicle_Data$type <- as.factor(Vehicle_Data$type)
Vehicle_Data$state <- as.factor(Vehicle_Data$state)
#Converting to date format
Vehicle_Data$year <- ymd(Vehicle_Data$year, truncated = 2L)
str(Vehicle_Data)
#Validating data in each column
summary(Vehicle_Data)
#glimpse(Vehicle_Data)
head(Vehicle_Data)
#trimming values of price
Vehicle_Data <- Vehicle_Data[(Vehicle_Data$price <= 200000) & (Vehicle_Data$price >= 500),]
Vehicle_Data <- Vehicle_Data[(Vehicle_Data$odometer <= 700000),]
#converting null odometer values to 0
Vehicle_Data$odometer[is.na(Vehicle_Data$odometer)] <- 0
Vehicle_Data$odometer[is_empty(Vehicle_Data$odometer)] <- 0
#Removing null values from the columns which we will need for hypothesis
sort(sapply(Vehicle_Data, function(x) sum(is.na(x))))#doubt
Vehicle_Data <- Vehicle_Data[!is.na(Vehicle_Data$odometer),]
Vehicle_Data <- Vehicle_Data[!is.na(Vehicle_Data$year),]
Vehicle_Data <- Vehicle_Data[!is.na(Vehicle_Data$title_status),]
Vehicle_Data <- Vehicle_Data[!is.na(Vehicle_Data$transmission),]
Vehicle_Data <- Vehicle_Data[!is.na(Vehicle_Data$fuel),]
Vehicle_Data <- Vehicle_Data[!is.na(Vehicle_Data$type),]
Vehicle_Data <- Vehicle_Data[!is.na(Vehicle_Data$drive),]
Vehicle_Data <- Vehicle_Data[!is.na(Vehicle_Data$condition),]
Vehicle_Data <- Vehicle_Data[!is.na(Vehicle_Data$size),]
Vehicle_Data <- Vehicle_Data[!is.na(Vehicle_Data$cylinders),]
# range of Price
range(Vehicle_Data$price)
#Creating 6 bins/categories for Price
Vehicle_Data$Price_Cat <- cut(Vehicle_Data$price, breaks = c(0,5000,10000,15000,20000,40000,max(Vehicle_Data$price)), labels = c("0-5k", "5-10k", "10-15k","15-20k","20-40k","VeryHigh"))
#Giving each row a sequential number by converting ID
Vehicle_Data$id <- seq(from = 1, to = nrow(Vehicle_Data))
#Getting full State Names
Vehicle_Data <- left_join(Vehicle_Data, StateName, by = c("state"="ShortForm"))
# Writing data to xlsx
write_xlsx(Vehicle_Data,"Final_Vehicle2.xlsx")
#--------------------------------------------
#Statistical Analysis
#mean
mean_price <- mean(Vehicle_Data$price)
mean_price
mean_odometer <- mean(Vehicle_Data$odometer)
mean_odometer
#median
median_price <- median(Vehicle_Data$price)
median_price
median_odometer <- median(Vehicle_Data$odometer)
median_odometer
#Min and Max
min_price <- min(Vehicle_Data$price)
min_price
max_price <- max(Vehicle_Data$price)
max_price
min_odometer <- min(Vehicle_Data$odometer)
max_odometer <- max(Vehicle_Data$odometer)
min_odometer
max_odometer
#Range
range_price <- range(Vehicle_Data$price)
range_price
range_odometer <- range(Vehicle_Data$odometer)
range_odometer
#Variance
variance_price <- var(Vehicle_Data$price)
variance_price
variance_odometer <- var(Vehicle_Data$odometer)
variance_odometer
#Standard Variance
sd_price <- sd(Vehicle_Data$price)
sd_price
sd_odometer <- sd(Vehicle_Data$odometer)
sd_odometer
#Coefficient of Variation
cov_price <- (sd_price/mean_price)*100
cov_price
cov_odometer <- (sd_odometer/mean_odometer)*100
cov_odometer
#Skewness
skewness_price <- skewness(Vehicle_Data$price)
skewness_price
skewness_odometer <- skewness(Vehicle_Data$odometer) #highly skewed
skewness_odometer
#Kurtosis
kurtosis_price <- kurtosis(Vehicle_Data$price)
kurtosis_price
kurtosis_odometer <- kurtosis(Vehicle_Data$odometer)
kurtosis_odometer
#--------------------------------------------
# Frequency of used vehicles based on price
price_freq <- Vehicle_Data %>%
#select(id, Price_Cat) %>%
group_by(Price_Cat) %>%
summarise(count = n()) %>%
arrange(desc(count)) %>%
mutate(p_pmf = round(count/sum(count),6)) %>% #pmf
mutate(p_cdf = round(cumsum(p_pmf),6)) #cmf
price_freq
#expected value
price_freq_val <- weighted.mean(price_freq$count, price_freq$p_pmf)
price_freq_val
# Frequency of used vehicles listed based on condition
cond_freq <- Vehicle_Data %>%
#select(id, condition) %>%
group_by(condition) %>%
summarise(count = n()) %>%
arrange(desc(count)) %>%
mutate(c_pmf = round(count/sum(count),6)) %>% #pmf
mutate(c_cdf = round(cumsum(c_pmf),6)) #cmf
cond_freq
#expected value
cond_freq_val <- weighted.mean(cond_freq$count, cond_freq$c_pmf)
cond_freq_val
# joint probability for price and condition
# frequency table
joint_freq <- outer(price_freq$count, cond_freq$count, FUN = "+")
rownames(joint_freq) <- price_freq$Price_Cat
colnames(joint_freq) <- cond_freq$condition
joint_freq
# probabilities
joint_prob <- round(joint_freq/sum(joint_freq),6)
joint_prob
# restructuring the data
joint_df <- melt(joint_prob)
colnames(joint_df) <- c('Price', 'Condition', 'frequency')
joint_df
#heat map for Joint freq of price and condition
ggplot(joint_df,aes(x=Price, y=Condition, fill=frequency))+
geom_tile()+scale_fill_distiller(palette = "YlGn", direction = 1) +geom_text(aes(label = frequency),hjust=1, vjust = 2.0, colour = "black")+
theme_light()+ggtitle("Joint freq Heat Map of price and condition")
# calculating the coefficient
cor(price_freq$count, cond_freq$count)
#--------------------------------------------
#Data Visualization
#histogram for price
# price_filt <- filter(Vehicle_Data$price < 45000) %>%
# price_filt <- Vehicle_Data[(Vehicle_Data$price <= 45000) ]
ggplot(Vehicle_Data, aes(x=price)) + geom_histogram(fill="red",color="white",alpha=0.7,bins = 20)+ggtitle("Price Distribution")
# odometer vs price scatterplot
ggplot(Vehicle_Data, aes(x=odometer, y=price)) + geom_point(fill="blue",color="blue",alpha=0.7,size=.25)+ggtitle("Miles driven")
#boxplot for price
#ggplot(data = Vehicle_Data, aes(x = "", y = price)) + geom_boxplot
#bar graph for price based on condition of the car
Vehicle_Data %>%
#select(id, price, condition) %>%
group_by(condition) %>%
drop_na() %>%
summarise (Average = mean(price)) %>%
arrange(desc(Average)) %>%
ggplot(aes(x=Average, y=fct_inorder(condition))) + geom_bar(stat="identity", fill="grey")+ xlab("Average Price") + ylab("Condition of the car")+ geom_text(aes(label = round(Average,3)),hjust=1, vjust = 2.0, colour = "black")+ggtitle("Average Price for each Condition")
#Area graph for Average price for last 30 years
Vehicle_Data %>%
#select(id, price, year) %>%
filter(year>="1990-01-01") %>%
group_by(year) %>%
drop_na() %>%
summarise(Average = mean(price)) %>%
ggplot(aes(x=year, y=Average)) +
geom_area( fill="#39a4a5", alpha=0.4) +
geom_line(color="#69c4a2", size=2) +
geom_point(size=2, color="#69c4c2") +
theme_light() +
ggtitle("Average price for last 30 years")
#tree map for Average Price of each Manufacture
Vehicle_Data %>%
#select(id, price, manufacturer) %>%
group_by(manufacturer) %>%
drop_na() %>%
summarise(Average = mean(price)) %>%
arrange(desc(Average)) %>%
ggplot(aes(area = Average,
fill = Average,
label=manufacturer)) +geom_treemap()+
geom_treemap_text(colour = "black",
place = "top")+scale_fill_distiller(palette = "RdPu", direction = 1)+
ggtitle("Average Price of each Manufacture")
#Line plot for number of cars listed each year
Vehicle_Data %>%
#select(id,year) %>%
group_by(year) %>%
drop_na() %>%
summarise(Count = n()) %>%
ggplot(aes(x=year, y=Count)) +
geom_line(color="black", size=1) +
theme_light() +
ggtitle("Number of vehicles listed each year")
#Pie chart showing number of vehicle of each type listed
Vehicle_Data %>%
#select(id,price,type) %>%
group_by(type) %>%
drop_na() %>%
summarise(Count = n()) %>%
ggplot(aes(x="", y=Count, fill=type)) +
geom_bar(stat="identity", width=1) +
coord_polar("y", start=0) + theme_void() +
ggtitle("Number of vehicle of each type listed")+
scale_fill_manual(values=c("#551A8B", "#E69F00", "#56B4E9","#00008B", "#00611C", "#00EE00","#2F2F4F", "#E35152", "#999999","#F08080", "#EEEE00", "#8B5A00","#55141C"))
#Map to show price for each state
mapdata <- Vehicle_Data %>%
#select(id, price, `US STATE`) %>%
group_by(`US STATE`) %>%
drop_na() %>%
summarise(Average = mean(price)) %>%
arrange(desc(Average))
mapdata$`US STATE` <- tolower(mapdata$`US STATE`)
us_states <- map_data("state")
#for labels
us_states_labels<-us_states %>%
left_join(mapdata, by=c("region"="US STATE")) %>%
#select(long, lat, group, Average, region ) %>%
group_by(`region`) %>%
summarise(across(long:Average, ~ mean(.x, na.rm = TRUE)))
us_states %>%
left_join(mapdata, by=c("region"="US STATE")) %>%
ggplot(aes(x=long,y=lat,group=group, fill=Average))+ggtitle("State-wise Average price of vehicles")+
geom_polygon(color = "gray90", size = 0.1)+geom_text(data=us_states_labels,aes(long, lat, label = region), size=2, vjust = 0, nudge_y = -0.05,hjust = 0, nudge_x = -0.7)+scale_fill_distiller(palette = "YlGnBu", direction = 1)+theme(axis.title.x=element_blank(),axis.text.x=element_blank(),axis.ticks.x=element_blank(),axis.title.y=element_blank(),axis.text.y=element_blank(),axis.ticks.y=element_blank())
#scatter plot for price and odometer
#Vehicle_Data %>%
# select(id,year,Price_Cat,price,odometer) %>%
# group_by(year,Price_Cat) %>%
# summarise(across(price:odometer, ~ mean(.x, na.rm = TRUE))) %>%
#ggplot(aes(x=price, y=odometer, color=Price_Cat)) +
# geom_point(size=1) +
# theme_ipsum()
#--------------------------------------------
#Chisquare
#Null hypothesis (H0): There is no significant difference between the observed and the expected value.
#Alternative hypothesis (Ha): There is a significant difference between the observed and the expected value.
#Chisquare for state
#H0 -> Used vehicles listed for each state are equally distributed
#Ha -> Used vehicles listed for each state are not equally distributed
Chi_state <- Vehicle_Data %>%
#select(state) %>%
group_by(state) %>%
drop_na() %>%
summarise(Count = n())
chisq.test(Chi_state$Count,p = rep(1/nrow(Chi_state), nrow(Chi_state)))
#goodness of fit
plot(Vehicle_Data$price, pch=1)
hist(Vehicle_Data$price)
plotdist(Vehicle_Data$price, histo = TRUE, demp = TRUE)
descdist(Vehicle_Data$price)
#Price is continuous , so evaluating gamma, m=normal, lognormal, exponential and uniform distributions
fit_g <- fitdist(Vehicle_Data$price/100, "gamma")
fit_n <- fitdist(Vehicle_Data$price/100, "norm")
fit_ln <- fitdist(Vehicle_Data$price/100, "lnorm")
fit_e <- fitdist(Vehicle_Data$price/100, "exp")
fit_u <- fitdist(Vehicle_Data$price/100, "unif")
gofstat(list(fit_g,fit_n,fit_ln,fit_u,fit_e), fitnames = c("gamma", "normal", "lognormal","exponential","uniform"))
descdist(Vehicle_Data$price)
summary(fit_ln)
summary(fit_g)
#plotting to find the best fit
par(mfrow=c(2,2))
plot.legend <- c("gamma","lognormal","uniform")
denscomp(list(fit_g,fit_n,fit_ln,fit_u,fit_e), legendtext = plot.legend) #gamma
cdfcomp (list(fit_g,fit_n,fit_ln,fit_u,fit_e), legendtext = plot.legend) #gamma
qqcomp (list(fit_g,fit_n,fit_ln,fit_u,fit_e), legendtext = plot.legend) #uniform
ppcomp (list(fit_g,fit_n,fit_ln,fit_u,fit_e), legendtext = plot.legend) #gamma
#overall gamma/lognormal is the best fit
#--------------------------------------------
#Statistical Test
#--------------------------------------------
# Linear Regression
str(Vehicle_Data)
Vehicle_Data2 <- Vehicle_Data
Vehicle_Data2$price <- as.numeric(Vehicle_Data2$price)
Vehicle_Data2$odometer <- as.numeric(Vehicle_Data2$odometer)
Vehicle_Data2$condition <- as.numeric(Vehicle_Data2$condition)
Vehicle_Data2$cylinders <- as.numeric(Vehicle_Data2$cylinders)
Vehicle_Data2$year <- as.numeric(Vehicle_Data2$year)
Vehicle_Data2$size <- as.factor(Vehicle_Data2$size)
Vehicle_Data2$size <- as.numeric(Vehicle_Data2$size)
Vehicle_Data2$fuel <- as.factor(Vehicle_Data2$fuel)
Vehicle_Data2$fuel <- as.numeric(Vehicle_Data2$fuel)
str(Vehicle_Data2)
# Check to see if there are missing data?
sum(is.na(Vehicle_Data2))
# To achieve reproducible model; set the random seed number
set.seed(100)
# Performs stratified random split of the data set
TrainingIndex <- createDataPartition(Vehicle_Data2$id, p=0.7, list = FALSE)
TrainingSet <- Vehicle_Data2[TrainingIndex,] # Training Set
TestingSet <- Vehicle_Data2[-TrainingIndex,] # Test Set
###############################
# Build Training model
Model <- train(price ~ odometer + price + condition + cylinders + year + size + fuel, data = TrainingSet,
method = "lm",
na.action = na.omit,
preProcess=c("scale","center"),
trControl= trainControl(method="none")
)
# Apply model for prediction
Model.training <-predict(Model, TrainingSet) # Apply model to make prediction on Training set
Model.testing <-predict(Model, TestingSet) # Apply model to make prediction on Testing set
# Model performance (Displays scatter plot and performance metrics)
# Scatter plot of Training set
plot(TrainingSet$price,Model.training, col = "blue" )
plot(TestingSet$price,Model.testing, col = "blue" )
# Model performance summary
summary(Model)
cor(TrainingSet$price,Model.training)
cor(TestingSet$price,Model.testing)
|
086b780dfbb5d37372cbc09af92635209fc516a8
|
ee0689132c92cf0ea3e82c65b20f85a2d6127bb8
|
/78-TM/SM-rtexttools1.R
|
34ef07e98368489ca1a72ca1eb753c465b043380
|
[] |
no_license
|
DUanalytics/rAnalytics
|
f98d34d324e1611c8c0924fbd499a5fdac0e0911
|
07242250a702631c0d6a31d3ad8568daf9256099
|
refs/heads/master
| 2023-08-08T14:48:13.210501
| 2023-07-30T12:27:26
| 2023-07-30T12:27:26
| 201,704,509
| 203
| 29
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,196
|
r
|
SM-rtexttools1.R
|
# Text Analysis
library(RTextTools)
library(e1071)
pos_tweets = rbind(
c('I love this car', 'positive'),
c('This view is amazing', 'positive'),
c('I feel great this morning', 'positive'),
c('I am so excited about the concert', 'positive'),
c('He is my best friend', 'positive')
)
neg_tweets = rbind(
c('I do not like this car', 'negative'),
c('This view is horrible', 'negative'),
c('I feel tired this morning', 'negative'),
c('I am not looking forward to the concert', 'negative'),
c('He is my enemy', 'negative')
)
test_tweets = rbind(
c('feel happy this morning', 'positive'),
c('larry friend', 'positive'),
c('not like that man', 'negative'),
c('house not great', 'negative'),
c('your song annoying', 'negative')
)
tweets = rbind(pos_tweets, neg_tweets, test_tweets)
# build dtm
matrix= create_matrix(tweets[,1], language="english",
removeStopwords=FALSE, removeNumbers=TRUE,
stemWords=FALSE)
# train the model
mat = as.matrix(matrix)
classifier = naiveBayes(mat[1:10,], as.factor(tweets[1:10,2]) )
# test the validity
predicted = predict(classifier, mat[11:15,]); predicted
table(tweets[11:15, 2], predicted)
recall_accuracy(tweets[11:15, 2], predicted)
# build the data to specify response variable, training set, testing set.
container = create_container(matrix, as.numeric(as.factor(tweets[,2])), trainSize=1:10, testSize=11:15,virgin=FALSE)
models = train_models(container, algorithms=c("MAXENT" , "SVM", "RF", "BAGGING", "TREE"))
#Now, we can classify the testing set using the trained models.
results = classify_models(container, models)
#How about the accuracy?
# accuracy table
table(as.numeric(as.factor(tweets[11:15, 2])), results[,"FORESTS_LABEL"])
table(as.numeric(as.factor(tweets[11:15, 2])), results[,"MAXENTROPY_LABEL"])
# recall accuracy
recall_accuracy(as.numeric(as.factor(tweets[11:15, 2])), results[,"FORESTS_LABEL"])
recall_accuracy(as.numeric(as.factor(tweets[11:15, 2])), results[,"MAXENTROPY_LABEL"])
recall_accuracy(as.numeric(as.factor(tweets[11:15, 2])), results[,"TREE_LABEL"])
recall_accuracy(as.numeric(as.factor(tweets[11:15, 2])), results[,"BAGGING_LABEL"])
recall_accuracy(as.numeric(as.factor(tweets[11:15, 2])), results[,"SVM_LABEL"])
#To summarize the results (especially the validity) in a formal way:
# model summary
analytics = create_analytics(container, results)
summary(analytics)
head(analytics@document_summary)
analytics@ensemble_summary
#To cross validate the results:
N=4
set.seed(2014)
cross_validate(container,N,"MAXENT")
cross_validate(container,N,"TREE")
cross_validate(container,N,"SVM")
cross_validate(container,N,"RF")
#The results can be found on my Rpub page. It seems that maxent reached the same recall accuracy as naive Bayes. The other methods even did a worse job. This is understandable, since we have only a very small data set. To enlarge the training set, we can get a much better results for sentiment analysis of tweets using more sophisticated methods. I will show the results with anther example.
#Sentiment analysis for tweets
#The data comes from victorneo. victorneo shows how to do sentiment analysis for tweets using Python. Here, I will demonstrate how to do it in R.
#Read data:
###################
#"load data" : set it to your file
###################
setwd("D:/Twitter-Sentimental-Analysis-master/")
happy = readLines("./happy.txt")
sad = readLines("./sad.txt")
happy_test = readLines("./happy_test.txt")
sad_test = readLines("./sad_test.txt")
tweet = c(happy, sad)
tweet_test= c(happy_test, sad_test)
tweet_all = c(tweet, tweet_test)
sentiment = c(rep("happy", length(happy) ),
rep("sad", length(sad)))
sentiment_test = c(rep("happy", length(happy_test) ),
rep("sad", length(sad_test)))
sentiment_all = as.factor(c(sentiment, sentiment_test))
library(RTextTools)
First, try naive Bayes.
# naive bayes
mat= create_matrix(tweet_all, language="english",
removeStopwords=FALSE, removeNumbers=TRUE,
stemWords=FALSE, tm::weightTfIdf)
mat = as.matrix(mat)
classifier = naiveBayes(mat[1:160,], as.factor(sentiment_all[1:160]))
predicted = predict(classifier, mat[161:180,]); predicted
table(sentiment_test, predicted)
recall_accuracy(sentiment_test, predicted)
Then, try the other methods:
# the other methods
mat= create_matrix(tweet_all, language="english",
removeStopwords=FALSE, removeNumbers=TRUE,
stemWords=FALSE, tm::weightTfIdf)
container = create_container(mat, as.numeric(sentiment_all),
trainSize=1:160, testSize=161:180,virgin=FALSE) #可以设置removeSparseTerms
models = train_models(container, algorithms=c("MAXENT",
"SVM",
#"GLMNET", "BOOSTING",
"SLDA","BAGGING",
"RF", # "NNET",
"TREE"
))
# test the model
results = classify_models(container, models)
table(as.numeric(as.numeric(sentiment_all[161:180])), results[,"FORESTS_LABEL"])
recall_accuracy(as.numeric(as.numeric(sentiment_all[161:180])), results[,"FORESTS_LABEL"])
Here we also want to get the formal test results, including:
analytics@algorithm_summary: Summary of precision, recall, f-scores, and accuracy sorted by topic code for each algorithm
analytics@label_summary: Summary of label (e.g. Topic) accuracy
analytics@document_summary: Raw summary of all data and scoring
analytics@ensemble_summary: Summary of ensemble precision/coverage. Uses the n variable passed into create_analytics()
Now let’s see the results:
# formal tests
analytics = create_analytics(container, results)
summary(analytics)
head(analytics@algorithm_summary)
head(analytics@label_summary)
head(analytics@document_summary)
analytics@ensemble_summary # Ensemble Agreement
# Cross Validation
N=3
cross_SVM = cross_validate(container,N,"SVM")
cross_GLMNET = cross_validate(container,N,"GLMNET")
cross_MAXENT = cross_validate(container,N,"MAXENT")
|
dc96cce17dae17a3d644b0f2ac0b3f896d54d902
|
6682e0eabb4f934d177397904b55bea31d49cc0d
|
/config/2014_PR_config.r
|
bac48cce1dd5bf772066da18f12a0356ef241183
|
[
"MIT"
] |
permissive
|
methiess/PSC-FRAM-Admin
|
9159aa6fc4644fd0a321fec2d63ca1400fab7572
|
14309790ecb30118757dc890fcb5d6e5c85db3f8
|
refs/heads/master
| 2020-04-16T08:18:21.410928
| 2019-01-18T03:45:20
| 2019-01-18T03:45:20
| 165,419,485
| 0
| 0
|
MIT
| 2019-01-18T03:45:22
| 2019-01-12T18:06:22
|
HTML
|
UTF-8
|
R
| false
| false
| 780
|
r
|
2014_PR_config.r
|
run.year <- 2014
post.season.fram.db <- "./fram db/PeriodicReportdb/FramVS2-PSC-Coho-Backwards-redo 2010-2016 January 2019 products.mdb"
post.season.run.name <- "bc-bkCoho2014 step 3"
post.season.tamm <- "./fram db/PeriodicReportdb/updated2010-2016TAMMfiles/BK 2014 January 2019 redo step 3.xlsm"
post.season.tamm.fishery.ref <- "./data/TammFisheryQueetsRef.csv"
post.season.tamm.esc.ref <- "./data/TammEscQueetsRef.csv"
pre.season.fram.db <- "./fram db/PeriodicReportdb/US_PFMC_NOF_FinalCohoFRAM_MultiYr.mdb"
pre.season.run.name <- "bc-1416 Final"
pre.season.tamm <- "./fram db/PeriodicReportdb/old2010-2016TAMMfiles/coho BK 2014 Final no tami_AR021716.xlsm"
pre.season.tamm.fishery.ref <- "./data/TammFisheryFullRef.csv"
pre.season.tamm.esc.ref <- "./data/TammEscFullRef.csv"
|
1f4290cd149e6364380e5f27ea6da9cb2ef0b26e
|
a246c544d53ca52369cfc89879937d70f26eb1bb
|
/test.R
|
cc667c6a348e6eb9938197372ea16400b0e6598f
|
[] |
no_license
|
innuo/CausalSimR
|
a06a2b7d108dc391eb46f6dc0cda213c9acc1840
|
2f0cdbf1f3a23a32f51b81e8c49b5cbe224791d4
|
refs/heads/master
| 2020-05-05T09:04:35.422413
| 2020-02-29T21:21:47
| 2020-02-29T21:21:47
| 179,890,124
| 0
| 0
| null | 2020-01-20T15:17:02
| 2019-04-06T21:46:34
|
R
|
UTF-8
|
R
| false
| false
| 3,252
|
r
|
test.R
|
# Does this row work
single_row_test <- function(){
d1 <- read.csv("../CausalSimPy/data/SinglePearlTest1.csv", na.strings=c("NA", ""))
d2 <- read.csv("../CausalSimPy/data/SinglePearlTest2.csv", na.strings=c("NA", ""))
dataset <- DataSet$new(d1)
dataset$attach_data(d2)
sim <- CausalSimModel$new(dataset)
sim$learn_structure()
sim$plot()
}
basic_test <- function(data.file = "../CausalSimPy/data/5d.csv"){
full.data <- read.csv(data.file)
dataset <- DataSet$new(full.data)
dataset$fill_missing()
#dataset$drop_missing()
sim <- CausalSimModel$new(dataset)
sim$learn_structure()
sim$plot()
#sim$learn_samplers(estimate.fit.score=FALSE)
#browser()
fit.scores <- sim$learn_samplers(estimate.fit.score=TRUE)
print(fit.scores)
print(paste("Size =", length(serialize(sim, NULL))))
df <- sim$sample(10000)
if("Price" %in% names(df)){
plot(full.data$Price, full.data$VolumeBought, col="blue")
points(df$Price, df$VolumeBought, col="red")
d <- head(full.data, 1)
print(d)
print(sim$counterfactual(d, list(Price=100)))
}
sim
}
basic_attach_test <- function(){
df1 <- read.csv("../CausalSimPy/data/5d_1.csv")
dataset <- DataSet$new(df1)
df2 <- read.csv("../CausalSimPy/data/5d_2.csv")
dataset$attach_data(df2)
sim <- CausalSimModel$new(dataset)
sim$learn_structure()
sim$plot()
fit.scores <- sim$learn_samplers(estimate.fit.score=TRUE)
print(fit.scores)
df <- sim$sample(10000)
plot(full.data$Price, full.data$VolumeBought, col="blue")
points(df$Price, df$VolumeBought, col="red")
}
missing_test <- function(train.sample.size = 1000){
full.data <- read.csv("../CausalSimPy/data/5d.csv")
data <- full.data[sample(1:nrow(full.data), train.sample.size),]
missing.data <- drop.data.cells(data, 0.3) #add missings
dataset <- DataSet$new(missing.data)
dataset$fill_missing()
#dataset$drop_missing()
sim <- CausalSimModel$new(dataset)
sim$learn_structure()
sim$learn_samplers()
sim$plot()
filled.gibbs <- sim$fill_gibbs(missing.data)
filled.mice <- sim$dataset$filled.data
#diagnostics(data, filled.gibbs, missing.data, "Gibbs")
#diagnostics(data, filled.mice, missing.data, "MICE")
}
diagnostics <- function(data, filled, missing.data, label){
for(i in 1:ncol(data)){
missing.inds <- is.na(missing.data[,i])
if(class(data[,i]) == 'numeric'){
rho = cor(data[missing.inds, i], filled[missing.inds, i])
rmse = rmse(data[missing.inds, i], filled[missing.inds, i])
plot(data[missing.inds, i], filled[missing.inds, i], pch=20,
col="blue", main=paste(label, ":", names(data)[i], ", rmse =", round(rmse, 2)), xlab="Original", ylab="Filled")
abline(0, 1, col="red")
grid()
}
else{
cat("\n\n")
cat("==============\n")
print(paste(label, ":", names(data)[i]))
cat("-------------\n")
tmp <- cbind.data.frame(data[missing.inds, i], filled[missing.inds, i])
names(tmp) <- c("Original", "Filled")
print(table(tmp))
}
}
}
drop.data.cells <- function(df, fraction){
for(i in 1:ncol(df)){
na.inds <- sample(1:nrow(df), round(nrow(df) * fraction))
df[na.inds, i] <- NA
}
df
}
rmse <- function(x, y){
sqrt(mean((x-y)^2))
}
|
dfdac2e64dca7abb48948ed3be78c05ad563cb3e
|
79d422c4d5614cb08bc91963efabab72a2de1b1a
|
/Stringr().R
|
61713f14f569a02b514a392f55e86875b7ddc8e1
|
[] |
no_license
|
jenny2202/Bank-optimization
|
1edb61a0605efef519dd00203c6b49032e4d2ad9
|
292d8ecd676b2a0c12c5bb46669450423091ee14
|
refs/heads/master
| 2021-10-25T23:46:58.939592
| 2019-04-08T12:08:45
| 2019-04-08T12:08:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 447
|
r
|
Stringr().R
|
library(stringr)
do.call("bind_rows", my_card) %>%
slice(-1) %>%
select(-breaks, -is_special_values, -count, -count_distr, -good, -bad, -badprob) %>%
mutate_if(is.numeric, function(x) {round(x, 3)}) %>%
mutate(bin = bin %>%
str_replace_all("\\[", "From ") %>% # Replace [-Inf,6000) -> from... to...
str_replace_all("\\,", " to ") %>%
str_replace_all("\\)", "")) -> iv_for_predictors_point
|
8c8f1c22085fccb318c09fae25e90a12af69510c
|
2721df14d282cc9e1c00169b35b4ae27990b80be
|
/Final/pollutantmean.R
|
ef0ede950f9094b579a9df2d5ec03f3915c74428
|
[] |
no_license
|
diegonogare/datasciencecoursera
|
bab2606ae7ca50760171cbf633819652e899b1b8
|
782910ee772ca4d259906d2090c8ec376db7bf28
|
refs/heads/master
| 2020-06-06T13:00:20.139936
| 2014-07-25T14:26:27
| 2014-07-25T14:26:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 248
|
r
|
pollutantmean.R
|
pollutantmean <- function(directory, pollutant, id = 1:332) {
loadFiles <- dget("loadFiles.R")
lst <- loadFiles(directory, id, pollutant)
values <- unlist(lst)
values[which(c(1,diff(values)) != 0)]
mean(values, na.rm = TRUE)
}
|
a99430509ca437ec0a3545749589dcf5af87007f
|
dade7b41292cdbc491cbfff3660b21f7a40ac45e
|
/tests/testthat/test_best_models.R
|
102bd0770fb17cc56a65d43fb6c0bcfc317dc6a6
|
[] |
no_license
|
statdivlab/CatchMore
|
8eb735a84df2b8788f4bd8d127b30f969625d623
|
8c1ec51f6a6ac2939b9561130e2ce74ada1b91cd
|
refs/heads/master
| 2020-04-09T15:13:41.012918
| 2019-03-15T17:42:55
| 2019-03-15T17:42:55
| 160,420,021
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 490
|
r
|
test_best_models.R
|
library(CatchAll)
library(breakaway)
library(testthat)
context("Best models are the same")
data(apples)
test_that("Analysis of the apples dataset gives the same best model", {
apple_best_model <- best_model(apples)
expect_equal(apple_best_model$name,
"Three-component Geometric mixture Model")
expect_equal(apple_best_model$other$cutoff,
163)
## the estimate doesn't differ much
expect_lte(abs(apple_best_model$est - 1477.1), 0.05 * 1477.1)
})
|
91990995b04d82086e8c89af7720b0fc2aa02ba5
|
54bf9bc76aaa7e1fec5961efb12bfb636fa90a2e
|
/Archive/NIMBioS.code/shiny/server.R
|
f056c7e682db3181a6a8045a188f2db5e440961e
|
[] |
no_license
|
christianparobek/skeleSim
|
25d63dc3eeee6d8218d19e0f011229cfb843d053
|
0d61409497283ac1db129379b479639261695f83
|
refs/heads/master
| 2020-03-28T05:36:45.448623
| 2020-02-26T21:55:51
| 2020-02-26T21:55:51
| 32,469,895
| 3
| 9
| null | 2017-11-22T16:30:16
| 2015-03-18T16:16:29
|
HTML
|
UTF-8
|
R
| false
| false
| 2,135
|
r
|
server.R
|
library(shiny)
library(datasets)
# Define server logic required to summarize and view the selected
# dataset
shinyServer(function(input, output) {
# Server-side suppor for composing table of megalist values
megalistValues <- reactive({
# Compose data frame
data.frame(
Name = c("SNPs",
"Non-Diploid",
"Many Markers",
"Large Pop Size",
"Complex History",
"Deep Timeframes",
"Model Demography",
"Management Question",
"Fast Completion",
"Large Computer"),
Value = as.character(c(input$snps,
input$non.diploid,
input$marker.num,
input$pop.size,
input$complex.hist,
input$deep.time,
input$demography,
input$management,
input$completion.time,
input$computer)),
stringsAsFactors=FALSE)
})
# Server-side support for rendering megalist values
output$values <- renderTable({
megalistValues()
})
# Server-side support for rendering simulation name
output$simname <- renderText({
input$simname
})
# Server-side support for calculating coal / foward score
# output$simscore <-
# default responses
# responses <- c(input$snps,
# input$non.diploid,
# input$marker.num,
# input$pop.size,
# input$complex.hist,
# input$deep.time,
# input$demography,
# input$management,
# input$completion.time,
# input$computer)
# response weights
# forward.wts <- c(0, 0, 0.3, 0.2, 0, 0.2, 1, 1, 0.2, 0.3)
# get relative 'score' for each model
# fwd.score <- sum(forward.wts * responses) / length(responses)
# cat("Coalescent score: ", 1 - fwd.score, "\n")
# cat("Forward-time score: ", fwd.score, "\n")
})
|
cb8e9698f387244e0d93059efa4d50d1d49ec097
|
2d8ed97505da8675d277fc3b2ada8efcefc30185
|
/cachematrix.R
|
c4e82236f3c92dc42a50630f6992ae79216951f4
|
[] |
no_license
|
rbidanta/ProgrammingAssignment2
|
1528f90d6a8f8dbf4e20a4b414f8c4cef9dc025d
|
216d45b83671425f0ff119d681bed2837cc3ffc0
|
refs/heads/master
| 2020-04-03T07:22:20.321401
| 2016-01-24T00:59:26
| 2016-01-24T00:59:26
| 50,223,605
| 0
| 0
| null | 2016-01-23T04:37:45
| 2016-01-23T04:37:45
| null |
UTF-8
|
R
| false
| false
| 1,182
|
r
|
cachematrix.R
|
## In this assignment I am creating two R functions.
## makeCacheMatrix() and cacheSolve()
## This funtion takes a matrix as an argument and returns
## a special list does the following
## Sets the value of Matrix
## Gets the Value of Matrix
## Sets the value for Matrix Inverse
## Gets the value of Matrix Inverse
makeCacheMatrix <- function(x = matrix()) {
inv <-NULL
setMatrix <- function(){
x <<- y
inv <<- NULL
}
getMatrix <- function() x
setInverse <- function(solve) inv <<- solve
getInverse <- function() inv
list(setMatrix = setMatrix , getMatrix = getMatrix, setInverse = setInverse, getInverse = getInverse)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then this method calculates the inverse from the cache.
cacheSolve <- function(x, ...) {
inv <- x$getInverse()
if(!is.null(inv)){
message("Getting Inverse of Matrix")
return(inv)
}
thismatrix <- x$getMatrix()
inv <- solve(thismatrix)
x$setInverse(inv)
inv
}
|
29aa61aacc6a6ec86e8ee463a54b42a1ac2e335c
|
935282511da356aea1d1376112eb73ab947237e4
|
/doc/olink.annotation.R
|
e3b488fb7bad6b59fbbf5c3c736b9c08b4d46dc0
|
[] |
no_license
|
jinghuazhao/INF
|
c129b3871dd38db03392085e99b851edc7e036d2
|
18af1c5b8124b068ad9fdc2b24caa06183492e08
|
refs/heads/master
| 2023-08-14T19:57:48.911731
| 2023-08-14T16:44:57
| 2023-08-14T16:44:57
| 145,544,979
| 7
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,585
|
r
|
olink.annotation.R
|
# Goal: create master mapping file for Olink proteins
rm(list=ls())
#--------------------- Import ---------------------#
library(readxl)
library(stringr)
# no of tabs in the shitty olink sheet
n=12
mylist <- vector('list', n)
names(mylist) <- c('cardiometabolic', 'cell.regul', 'cvd2', 'cvd3', 'devel', 'imm.resp',
'imm.onc', 'inf', 'metab', 'neu', 'onc2', 'organ.damage')
for (i in 1:n){
mylist[[i]] <- as.data.frame(
read_xlsx(path= "~/post-doc/o-link/all-panels/Olink-validation-data-all-panels.xlsx",
sheet = i, range='A1:B93', col_names = TRUE)
)
}
#--------------------- Reformat ---------------------#
col.rename <- function(x) {
colnames(x) <- c("target", "uniprot")
x
}
mylist <- lapply(mylist, col.rename)
for (i in 1:n){
mylist[[i]] <- data.frame(
mylist[[i]], panel=rep(names(mylist)[i], nrow(mylist[[i]]))
)
}
df <- do.call('rbind', mylist)
#--------------------- Clean-up ---------------------#
# data input error by Olink 'o' instead of 'O'
df$target <- gsub("IL-2oRA", "IL-20RA", df$target)
# turns out TWEAK labelled with an out of date or inferior UP id
df$uniprot[grep('TWEAK', df$target)] <- "O43508"
# Clean up 1. identify bad entries: Olink have made some errors and Excel import causes some problems
# clean whitespace
df$uniprot <- gsub('\\\r\\\n', ";", df$uniprot, ignore.case = F)
df$uniprot <- gsub(', |,', ";", df$uniprot, ignore.case = F)
df$uniprot <- gsub("[[:space:]]", "", df$uniprot)
# Clean up 2. '-' represents isoform notation eg O43521-2
df$uniprot.isoform <- NA
df$uniprot.isoform[grep('-', df$uniprot)] <- grep('-', df$uniprot, v=T)
df$uniprot <- gsub("-[0-9]$", "", df$uniprot)
# Special circumstances 1: uniprot is 'NA'
na.ind <- grep('^NA', df$uniprot)
# turns out this is NTproBNP: give it the uniprot of BNP
for (i in na.ind){
if (grepl('brain\ natriuretic\ peptide', df$target[i])){
df$uniprot[i] <- 'P16860'
}
}
# Special circumstances 2:two ids for protein complex eg IL12A-IL12B
# uniprot ids sep by ';'
# df[grep(";", df$uniprot), ]
df$multiple.proteins <- FALSE
df$multiple.proteins[grep(";", df$uniprot)] <- TRUE
df$protein.1 <- df$uniprot
df$protein.2 <- NA
df$protein.2[which(df$multiple.proteins==T)] <- str_extract(string=df$uniprot, pattern = ";[A-Z0-9]+")[which(df$multiple.proteins==T)]
df$protein.2 <- gsub("^;", "", df$protein.2)
df$protein.1[which(df$multiple.proteins==T)] <- str_extract(string=df$uniprot, pattern = "^[A-Z0-9]+;")[which(df$multiple.proteins==T)]
df$protein.1 <- gsub(";$", "", df$protein.1)
# where there are 2 uniprot ids (eg protein complex) the uniprot ids are not always in consistent order
# lets make them in consistent alphabetical order
df$uniprot.ordered <- NA
df$uniprot.ordered[!df$multiple.proteins] <- df$uniprot[!df$multiple.proteins]
alphabetize.up <- function(x) {
if( !inherits(x, what='data.frame')){
stop('argument must be a data.frame')
}
y <- paste(sort(c( x$protein.1, x$protein.2)),collapse=';')
y
}
inds <- which(df$multiple.proteins)
for (i in inds){
df$uniprot.ordered[i] <- alphabetize.up(df[i,])
}
#annoying that p1 and p2 are arbitrary: now we've ordered things let's start over on this front
df$protein.1 <- df$protein.2 <-NULL
# now repeat the exercise for p1 and p2 using the alphabetized concatenation
df$protein.1 <- df$uniprot.ordered
df$protein.2 <- NA
df$protein.2[which(df$multiple.proteins==T)] <- str_extract(string=df$uniprot.ordered, pattern = ";[A-Z0-9]+")[which(df$multiple.proteins==T)]
df$protein.2 <- gsub("^;", "", df$protein.2)
df$protein.1[which(df$multiple.proteins==T)] <- str_extract(string=df$uniprot.ordered, pattern = "^[A-Z0-9]+;")[which(df$multiple.proteins==T)]
df$protein.1 <- gsub(";$", "", df$protein.1)
# col to identify dup proteins and which panels
dup.prots <- union( which( duplicated(df$uniprot.ordered)), which( duplicated(df$uniprot.ordered, fromLast = T)) )
df$prot.on.multiple.panel <- FALSE
df$prot.on.multiple.panel[dup.prots] <- TRUE
df$panels.with.prot <- NA
tmp.list <- split( df[dup.prots,], f=df$uniprot.ordered[dup.prots] )
mylist <- lapply(tmp.list, FUN = function(x) paste( as.character(x$panel), collapse=";" ) )
for (i in dup.prots){
uprot <- df$uniprot.ordered[i]
df[i, "panels.with.prot"] <- mylist[[uprot]]
}
#--------------------- Gene symbol annotation ---------------------#
# matching to gene symbols: do for p1 and p2
library(biomaRt)
#ensembl <- useMart(biomart="ensembl",
# dataset="hsapiens_gene_ensembl",
# host='http://jul2018.archive.ensembl.org')
#filters <- listFilters(ensembl)
#x <- getBM(attributes = c('uniprotswissprot', 'hgnc_symbol', 'entrezgene', 'chromosome_name'),
# filters = 'uniprotswissprot',
# values = df$protein.1,
# mart = ensembl)
# some UP ids not found by BioMart: turns out we have outdated IDs
#df[which(!df$protein.1 %in% x$uniprotswissprot),]
#--------------------- Try an archived version of Ensembl ---------------------#
# find urls for old ensembl versions
listEnsemblArchives()
# hg19/GRCh37
ensembl.hg19 <- useMart(biomart= "ENSEMBL_MART_ENSEMBL",
dataset="hsapiens_gene_ensembl",
host = 'http://grch37.ensembl.org')
# note attribute names differ in the older release
gene.pos <- getBM(attributes = c('uniprotswissprot', 'hgnc_symbol', # 'entrezgene',
'chromosome_name', 'start_position', 'end_position'),
filters = 'uniprotswissprot',
values = unique(df$protein.1),
mart = ensembl.hg19)
# there are some duplicated genes
dup.ind <- union( which(duplicated(gene.pos$hgnc_symbol)),
which(duplicated(gene.pos$hgnc_symbol, fromLast = T))
)
# strange chr names
strange.ind <- which(!gene.pos$chromosome_name %in% c(1:22, 'X', 'Y'))
to.cut <- intersect(dup.ind, strange.ind)
gene.pos2 <- gene.pos[-to.cut,]
#-------------------------------------------------------------------------------#
df2 <- merge(x = df, y = gene.pos2, by.x = "protein.1", by.y = "uniprotswissprot", all = TRUE)
#-------------------------------------------------------------------------------#
inf <- df2[df2$panel=="inf",]
inf <- data.frame(target.short= gsub("^.+\\(|)$", "", inf$target), inf)
# load the olink inf eset
library(Biobase)
eset <- readRDS("~/post-doc/o-link/esets/round2/post-qc/eset.inf1.flag.out.outlier.out.rds")
features <- fData(eset)
features <- features[, c("olink.id", "uniprot.id")]
features$common.name <- gsub("^[0-9]+_", "", features$olink.id)
features$uniprot.id <- gsub("[[:space:]]", "", features$uniprot.id)
if (all(features$uniprot.id %in% inf$uniprot)==FALSE){
warning("not all the uniprot ids in the data olink suppliedare in the annotation file")
tmp <- features[!features$uniprot.id %in% inf$uniprot, ]
}
inf.2 <- merge(x = inf, y = features[,c("olink.id", "uniprot.id")], by.x = "protein.1", by.y = "uniprot.id", all = TRUE)
# which proteins have not been mapped to a gene
non.mapped <- which(is.na(inf.2$hgnc_symbol))
inf.2[non.mapped,]
# turns out there are alternative up ids for these, which explains why biomart couldn't map them to gene symbols
inf.2$alternate.uniprot <- NA
inf.2$alternate.uniprot[which(inf.2$target.short=="FGF-5")] <- "P12034"
inf.2$alternate.uniprot[which(inf.2$target.short=="CD6")] <- "P30203"
posn <- getBM(attributes = c('uniprotswissprot', 'hgnc_symbol', # 'entrezgene',
'chromosome_name', 'start_position', 'end_position'),
filters = 'uniprotswissprot',
values = inf.2$alternate.uniprot,
mart = ensembl.hg19)
for (i in non.mapped){
up <- inf.2$alternate.uniprot[i]
inf.2$chromosome_name[i] <- posn$chromosome_name[which(posn$uniprotswissprot==up)]
inf.2$start_position[i] <- posn$start_position[which(posn$uniprotswissprot==up)]
inf.2$end_position[i] <- posn$end_position[which(posn$uniprotswissprot==up)]
}
# check it worked
inf.2[non.mapped,]
inf.final <- inf.2[ ,c("target", "target.short", "uniprot", "panel", "prot.on.multiple.panel",
"panels.with.prot", "hgnc_symbol", "chromosome_name", "start_position", "end_position", "olink.id", "alternate.uniprot")]
write.table(inf.final, file="~/post-doc/o-link/scallop/olink.inf.panel.annot.txt",
row.names=F, col.names =T, sep="\t")
|
2d4b82fcce755648db5d561820b37e94ed0dbc4e
|
a0ceb8a810553581850def0d17638c3fd7003895
|
/scripts/rstudioserver_analysis/spikeins/mouse/H3K27me3_merged/project_new_K27me3_onto_old.R
|
40a3305913968c5ccf894b923527504004c4031e
|
[] |
no_license
|
jakeyeung/sortchicAllScripts
|
9e624762ca07c40d23e16dbd793ef9569c962473
|
ecf27415e4e92680488b6f228c813467617e7ee5
|
refs/heads/master
| 2023-04-15T22:48:52.272410
| 2022-10-24T10:45:24
| 2022-10-24T10:45:24
| 556,698,796
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,767
|
r
|
project_new_K27me3_onto_old.R
|
# Jake Yeung
# Date of Creation: 2020-10-04
# File: ~/projects/scchic/scripts/rstudioserver_analysis/spikeins/mouse/H3K27me3_merged/project_new_K27me3_onto_old.R
#
rm(list=ls())
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
library(Matrix)
library(topicmodels)
library(scchicFuncs)
library(hash)
library(igraph)
library(umap)
# Load data ---------------------------------------------------------------
hubprefix <- "/home/jyeung/hub_oudenaarden"
inf <- file.path(hubprefix, "jyeung/data/scChiC/raw_demultiplexed/LDA_outputs_all_spikeins/ldaAnalysisBins_mouse_spikein_projection_onto_old.VAN5046_VAN5230_BM/ldaOut.BM_H3K27me3_varfilt_countmat.2020-02-11.AllMerged.K-30.x.count_mat_H3K27me3_l2r_filt.2020-10-03.minl2r_0.varfilt_1.RData")
load(inf, v=T)
# # Plot old --------------------------------------------------------------
tm.result.old <- posterior(out.objs$out.lda)
tm.result.old <- AddTopicToTmResult(tm.result.old)
topics.mat.old <- tm.result.old$topics
jsettings <- umap.defaults
jsettings$n_neighbors <- 30
jsettings$min_dist <- 0.1
jsettings$random_state <- 123
umap.out <- umap(topics.mat.old, config = jsettings)
dat.umap.long <- data.frame(cell = rownames(umap.out[["layout"]]), umap1 = umap.out[["layout"]][, 1], umap2 = umap.out[["layout"]][, 2], stringsAsFactors = FALSE)
dat.umap.long <- DoLouvain(topics.mat.old, jsettings, dat.umap.long)
cbPalette <- c("#696969", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#006400", "#FFB6C1", "#32CD32", "#0b1b7f", "#ff9f7d", "#eb9d01", "#7fbedf")
ggplot(dat.umap.long, aes(x = umap1, y = umap2, color = louvain)) + geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + scale_color_manual(values = cbPalette)
dat.umap.long$batch <- "old"
dat.umap.long$stype <- sapply(dat.umap.long$cell, function(x) GetCondFromSamp(x, mark = "H3K27me3"))
dat.umap.long <- dat.umap.long %>%
mutate(stype = gsub("Linneg", "LinNeg", stype),
stype = gsub("StemCell", "LSK", stype))
# Plot new ----------------------------------------------------------------
topics.mat.proj <- out.lda.predict$topics
umap.pred <- predict(umap.out, data = topics.mat.proj)
dat.umap.long.pred <- data.frame(cell = rownames(umap.pred), umap1 = umap.pred[, 1], umap2 = umap.pred[, 2], stringsAsFactors = FALSE)
dat.umap.long.pred$batch <- "new"
dat.umap.long.pred <- scchicFuncs::AnnotateSortFromLayout.dat(dat.umap.long.pred)
ggplot(dat.umap.long.pred, aes(x = umap1, y = umap2)) +
geom_point() +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
dat.umap.merge <- bind_rows(dat.umap.long %>% dplyr::select(c(cell, umap1, umap2, batch, stype)), dat.umap.long.pred %>% dplyr::select(c(cell, umap1, umap2, batch, stype)))
ggplot(dat.umap.merge, aes(x = umap1, y = umap2, color = batch)) +
geom_point() +
theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
# Check var ---------------------------------------------------------------
# Load annots -------------------------------------------------------------
inf.annot <- "/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/from_rstudioserver/pdfs_all/GLMPCA_peaks_primetime/H3K4me1_H3K4me3_H3K27me3_glmpca_peaks_primetime.2020-09-29.H3K27me3.txt"
dat.annot <- fread(inf.annot)
dat.umap.merge.annot <- left_join(dat.umap.merge, subset(dat.annot, select = c(cell, cluster.renamed)))
cbPalette <- c("#696969", "#32CD32", "#56B4E9", "#FFB6C1", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#006400", "#FFB6C1", "#32CD32", "#0b1b7f", "#ff9f7d", "#eb9d01", "#7fbedf")
ggplot(dat.umap.merge.annot, aes(x = umap1, y = umap2, color = cluster.renamed)) +
geom_point() +
theme_bw() +
scale_color_manual(values = cbPalette, na.value = "grey85") +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
facet_wrap(~batch)
# Get sorted celltype -----------------------------------------------------
dat.umap.long.pred.annot <- dat.umap.long.pred
dat.umap.merge.annot2 <- left_join(dat.umap.merge.annot, subset(dat.umap.long.pred.annot, select = -c(umap1, umap2, batch)))
ggplot(dat.umap.merge.annot2 %>% filter(batch == "old"), aes(x = umap1, y = umap2, color = stype)) +
geom_point() +
theme_bw() +
scale_color_manual(values = cbPalette, na.value = "grey85") +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom") +
facet_wrap(~batch)
ggplot(dat.umap.merge.annot2 %>% filter(batch == "old"), aes(x = umap1, y = umap2, color = cluster)) +
geom_point() +
theme_bw() +
scale_color_manual(values = cbPalette, na.value = "grey85") +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom") +
facet_wrap(~batch)
ggplot(dat.umap.merge.annot2, aes(x = umap1, y = umap2, color = stype)) +
geom_point() +
theme_bw() +
scale_color_manual(values = cbPalette, na.value = "grey85") +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom") +
facet_wrap(~batch)
# Label new cells by nearest neighbors of old ----------------------------
UpdateNAs <- function(jsub, jknn){
# jsub from jsub <- subset(umap.out.merge.final.annots, mark == "K27m3")
cells.nas <- subset(jsub, is.na(cluster))$cell
names(cells.nas) <- cells.nas
if (length(cells.nas) == 0){
print("Done")
return(data.frame(NULL))
}
clst.new <- lapply(cells.nas, function(jcell){
cells.keep.i <- jknn[jcell, ]
cells.keep <- rownames(jknn)[cells.keep.i]
jtmp <- subset(jsub, cell %in% cells.keep) %>%
group_by(cluster) %>%
summarise(counts = length(cell)) %>%
ungroup() %>%
filter(!is.na(cluster)) %>% # guarantees convergence
filter(counts == max(counts))
if (nrow(jtmp) == 0){
print("No non-NA clusters nearby... returning NA")
return(NA)
} else {
# break ties randomly
return(sample(jtmp$cluster, size = 1))
}
})
clst.new.hash <- hash::hash(names(clst.new), clst.new)
# update jsub
jsub.new <- jsub %>%
rowwise() %>%
mutate(cluster = AssignHash(cell, clst.new.hash, null.fill = cluster))
return(jsub.new)
}
dat.umap.merge.annot2$cluster <- dat.umap.merge.annot2$cluster.renamed
jsettingsForImpute <- umap.defaults
jsettingsForImpute$n_neighbors <- 50
jsettingsForImpute$min_dist <- 0.1
jsettingsForImpute$random_state <- 123
topics.mat.merge <- rbind(topics.mat.old, topics.mat.proj)
umap.all <- umap(topics.mat.merge, config = jsettingsForImpute)
dat.umap.merge.annot2.imputed <- UpdateNAs(dat.umap.merge.annot2, jknn = umap.all$knn$indexes)
ggplot(dat.umap.merge.annot2.imputed, aes(x = umap1, y = umap2, color = cluster)) +
geom_point() +
scale_color_manual(values = cbPalette, na.value = "grey85") +
theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom") +
# facet_wrap(~batch)
# facet_grid(stype~batch)
facet_grid(batch~stype)
ggplot(dat.umap.merge.annot2.imputed, aes(x = umap1, y = umap2, color = cluster)) +
geom_point() +
scale_color_manual(values = cbPalette, na.value = "grey85") +
theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
# Add spikein ------------------------------------------------------------
inf.spikeins <- "/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/from_rstudioserver/quality_control_count_tables_mouse_for_lda.chromo2spikeinfilt.merged_across_runs/spikeins_dat_H3K27me3_merged.txt"
dat.spikeins <- fread(inf.spikeins)
dat.umap.merge.annot2.imputed.spikeins <- left_join(subset(dat.umap.merge.annot2.imputed, batch == "new"), subset(dat.spikeins, select = c(samp, spikeincounts, chromocounts, plate)), by = c("cell" = "samp")) %>%
ungroup() %>%
mutate(l2r = log2(chromocounts / spikeincounts),
l2r.wins = DescTools::Winsorize(l2r, probs = c(0.001, 0.999)))
ggplot(dat.umap.merge.annot2.imputed.spikeins, aes(x = umap1, y = umap2, color = log2(chromocounts / spikeincounts))) +
geom_point() +
theme_bw() +
scale_color_viridis_c(direction = 1) +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
ggplot(dat.umap.merge.annot2.imputed.spikeins, aes(x = umap1, y = umap2, color = l2r.wins)) +
geom_point() +
theme_bw(24) +
scale_color_viridis_c(direction = 1) +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
library(forcats)
ggplot(dat.umap.merge.annot2.imputed.spikeins %>% filter(!is.na(cluster)),
aes(x = forcats::fct_reorder(.f = cluster, .x = log2(chromocounts / spikeincounts), .fun = median, .desc = TRUE), y = log2(chromocounts/spikeincounts))) +
geom_boxplot() +
geom_point() +
theme_bw() +
xlab("") +
# ylab("log2(chromo to spikein)") +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))
ggplot(dat.umap.merge.annot2.imputed.spikeins %>% filter(!is.na(cluster)),
aes(x = forcats::fct_reorder(.f = cluster, .x = log2(chromocounts), .fun = median, .desc = TRUE), y = log2(chromocounts))) +
geom_boxplot() +
geom_point() +
theme_bw() +
xlab("") +
# ylab("log2(chromo to spikein)") +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))
ggplot(dat.umap.merge.annot2.imputed.spikeins %>% filter(!is.na(cluster)), aes(x = forcats::fct_reorder(.f = cluster, .x = l2r, .fun = median, .desc = TRUE), y = l2r)) +
geom_boxplot() +
geom_point() +
theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1)) +
facet_wrap(~experi)
# Check glmpca ------------------------------------------------------------
# inf.glmpca <- "/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/raw_demultiplexed/LDA_outputs_all_spikeins/glmpcaPois_mouse_spikein_VAN5046_VAN5230_BM_varfilt/count_mat_H3K27me3_l2r_filt.2020-10-03.minl2r_0.varfilt_1.5.glmpcaout.penalty_1.maxiter_1000.stochastic.avagrad.tol_1e-6.devfilt_5000.varfilt_1.5.RData"
inf.glmpca <- "/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/raw_demultiplexed/LDA_outputs_all_spikeins/glmpcaPois_mouse_spikein_VAN5046_VAN5230_BM_varfilt/count_mat_H3K27me3_l2r_filt.2020-10-03.minl2r_0.varfilt_1.glmpcaout.penalty_1.maxiter_1000.stochastic.avagrad.tol_1e-6.devfilt_5000.varfilt_1.RData"
assertthat::assert_that(file.exists(inf.glmpca))
load(inf.glmpca, v=T)
dat.umap.glmpca <- DoUmapAndLouvain(glmpcaout$factors, jsettings)
dat.umap.glmpca.annot <- left_join(dat.umap.glmpca, subset(dat.umap.merge.annot2.imputed.spikeins, select = -c(umap1, umap2))) %>%
rowwise() %>%
mutate(l2r = log2(chromocounts / spikeincounts))
ggplot(dat.umap.glmpca.annot, aes(x = umap1, y = umap2, color = cluster)) +
geom_point() +
theme_bw() +
scale_color_manual(values = cbPalette, na.value = "grey85") +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
facet_wrap(~stype)
dat.umap.glmpca.annot$l2r.wins <- DescTools::Winsorize(dat.umap.glmpca.annot$l2r, probs = c(0.01, 0.99))
ggplot(dat.umap.glmpca.annot, aes(x = umap1, y = umap2, color = l2r.wins)) +
geom_point() +
theme_bw() +
scale_color_viridis_c() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom") +
facet_wrap(~stype)
ggplot(dat.umap.glmpca.annot %>% filter(grepl("rep3", experi)), aes(x = umap1, y = umap2, color = l2r.wins)) +
geom_point() +
theme_bw() +
scale_color_viridis_c() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom") +
facet_wrap(~stype)
ggplot(dat.umap.glmpca.annot, aes(y = rowcoord, x = colcoord, color = l2r.wins, shape = stype)) +
geom_point(size = 3) +
theme_bw() +
theme(aspect.ratio=2/3, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_y_reverse() +
scale_color_viridis_c() +
facet_wrap(~experi)
# Check lda ---------------------------------------------------------------
inf.lda <- "/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/raw_demultiplexed/LDA_outputs_all_spikeins/ldaAnalysisBins_mouse_spikein_VAN5046_VAN5230_BM_varfilt/lda_outputs.count_mat_H3K27me3_l2r_filt.2020-10-03.minl2r_0.varfilt_1.K-30.binarize.FALSE/ldaOut.count_mat_H3K27me3_l2r_filt.2020-10-03.minl2r_0.varfilt_1.K-30.Robj"
load(inf.lda, v=T)
tm.result <- posterior(out.lda)
tm.result <- AddTopicToTmResult(tm.result, jsep = "")
dat.umap.lda <- DoUmapAndLouvain(tm.result$topics, jsettings)
dat.impute.log <- t(log2(tm.result$topics %*% tm.result$terms))
jchromos <- paste("chr", c(seq(19), "X", "Y"), sep = "")
dat.var <- CalculateVarAll(dat.impute.log, jchromos)
dat.umap.lda.annot <- left_join(dat.umap.lda, subset(dat.umap.merge.annot2.imputed.spikeins, select = -c(umap1, umap2))) %>%
ungroup() %>%
mutate(l2r = log2(chromocounts / spikeincounts),
l2r.wins = DescTools::Winsorize(l2r, probs = c(0.01, 0.99))) %>%
left_join(., dat.var)
dat.umap.lda.annot$stype <- factor(dat.umap.lda.annot$stype, levels = c("LSK", "LinNeg", "Unenriched"))
ggplot(dat.umap.lda.annot, aes(x = umap1, y = umap2, color = cluster)) +
geom_point() +
scale_color_manual(values = cbPalette) +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
ggplot(dat.umap.lda.annot, aes(x = umap1, y = umap2, color = cluster)) +
geom_point() +
scale_color_manual(values = cbPalette) +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
ggplot(dat.umap.lda.annot, aes(x = umap1, y = umap2, color = stype)) +
geom_point(size = 2.5) +
scale_color_manual(values = cbPalette) +
theme_bw(24) + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
ggplot(dat.umap.lda.annot, aes(x = umap1, y = umap2, color = stype)) +
geom_point() +
facet_wrap(~experi, nrow = 2) +
scale_color_manual(values = cbPalette) +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
ggplot(dat.umap.lda.annot, aes(x = umap1, y = umap2, color = l2r.wins)) +
geom_point() +
scale_color_viridis_c() +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
ggplot(dat.umap.lda.annot, aes(x = umap1, y = umap2, color = l2r.wins)) +
geom_point() +
facet_wrap(~stype) +
scale_color_viridis_c() +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
ggplot(dat.umap.lda.annot, aes(x = umap1, y = umap2, color = louvain)) +
geom_point() +
facet_wrap(~stype) +
scale_color_manual(values = cbPalette) +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
ggplot(dat.umap.lda.annot, aes(x = louvain, y = l2r.wins)) +
geom_boxplot() +
geom_point() +
facet_wrap(~stype) +
scale_color_viridis_c() +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
ggplot(dat.umap.lda.annot, aes(x = umap1, y = umap2, color = cell.var.within.sum.norm)) +
geom_point() +
scale_color_viridis_c(direction = -1) +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
ggplot(dat.umap.lda.annot, aes(x = l2r, y = cell.var.within.sum.norm, color = stype)) +
geom_point() +
scale_color_manual(values = cbPalette) +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
ylab("Intrachrom Var") +
xlab("log2(chromocounts / spikeincounts)")
m0 <- ggplot(dat.umap.lda.annot, aes(x = log2(chromocounts), fill = stype)) +
geom_density(alpha = 0.25) +
scale_color_manual(values = cbPalette) +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
print(m0)
m1 <- ggplot(dat.umap.lda.annot, aes(x = l2r, fill = stype)) +
geom_density(alpha = 0.25) +
scale_color_manual(values = cbPalette) +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom") +
xlab("log2(chromocounts / spikeincounts)")
m2 <- ggplot(dat.umap.lda.annot, aes(x = cell.var.within.sum.norm, fill = stype)) +
geom_density(alpha = 0.25) +
scale_color_manual(values = cbPalette) +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom") +
xlab("Intrachrom Var")
JFuncs::multiplot(m1, m2, cols = 2)
JFuncs::multiplot(m1, m0, m2, cols = 3)
jmerge <- left_join(dat.umap.merge.annot2.imputed, dat.umap.lda.annot, by = "cell")
ggplot(jmerge, aes(x = umap1.x, y = umap2.x, color = umap1.y)) +
geom_point() +
scale_color_viridis_c() +
theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
|
75d7ebfff81c07b4f2eca1124385f7d40e185f9d
|
615e1677bbe198ed819600ff110129f9456b81f7
|
/Examples/mass-funcitons.R
|
04c9c96e866df72061057b7ec6854088615af90e
|
[
"MIT"
] |
permissive
|
mattcwilde/stat509
|
b5de627628e399a6ffa2b4681eab47dd61651cf1
|
89c80453eb478c5373655bf2499036d8153614da
|
refs/heads/master
| 2021-05-21T23:40:42.374449
| 2020-04-03T23:17:07
| 2020-04-03T23:17:07
| 252,863,009
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,236
|
r
|
mass-funcitons.R
|
### Some examples of probability mass functions
# (1) Bernoulli
p <- 0.1
dbinom(1,1,p) # P(X=1) = p
dbinom(0,1,p) # P(X=0) = 1-p
# here we use the fact that Bernoulli(p)
# is the same as Binomial(n=1, p)
# "silly" values
dbinom(2,1,p) # P(X=2) = 0
dbinom(-1,1,p) # P(X=-1) = 0
dbinom(0.5,1,p) # P(X=0.5) = 0
#Plot of the mass function
# values
xseq <- -2:4 # including values other than 0 and 1
# is unnecessary
yseq <- dbinom(xseq,1,p)
plot(xseq,yseq,xlab="Values of x",ylim=c(0,1), ylab="P(X=x)",main=paste("pmf for Bernoulli distribution with p=",p))
# (2) Binomial distribution
p <- 0.4
n <- 8
dbinom(8,n,p) # P(X=8) = p^n
## let's check:
p^n
dbinom(0,n,p) # P(X=0) = (1-p)^n
# check:
(1-p)^n
xseq <- 0:n
yseq <- dbinom(xseq,n,p)
sum(yseq) # check
#Plot of the mass function
plot(xseq,yseq,xlab="Values of x",ylim=c(0,1), ylab="P(X=x)",main=paste("pmf for Binomial distribution with n=",n," and p=",p))
# (3) Geometric distribution
p <- 0.4
## R defines the Geometric distribution
## slightly differently from Goldberger and lecture notes
## In R it is the number of tosses *before* the first head
## whereas Goldberger and others define it as the number
## of tosses up to *and including* the first head.
## we address this by defining our own function:
dmygeom <- function(x,p){dgeom(x-1,p)}
dmygeom(1,p) # P(X=1) = p
#check:
p
dmygeom(2,p) # P(X=1) = p(1-p)
# check:
p*(1-p)
xseq <- 0:10 # 10 is arbitrary here
yseq <- dmygeom(xseq,p)
sum(yseq) # Why is this < 1?
#Plot of the mass function
plot(xseq,yseq,xlab="Values of x",ylim=c(0,1), ylab="P(X=x)",main=paste("pmf for Geometric distribution with p=",p))
# (4) Discrete Uniform
## R doesn't have a function for this.
## Let's build our own:
# First attempt:
ddiscunif <- function(x,n){1/n}
#
ddiscunif(1,5)
ddiscunif(2,5)
# looks ok so far... but
ddiscunif(6,5) # not so good : should be 0
ddiscunif(-1,5) # not so good : should be 0
# second attempt
ddiscunif <- function(x,n){
sum(rep(x,n)==(1:n))/n
}
# How does this work?
#
ddiscunif(1,5)
ddiscunif(2,5)
# looks ok so far
ddiscunif(6,5) # better
ddiscunif(-1,5) # better
# but... what about if we feed in a vector:
ddiscunif(c(0,1,2),5)
# not so good.
# we want 0, 0.2,0.2
# third attempt:
# safe for all inputs and can take vectors
ddiscunif <- function(x,n){
out <- rep(NA,length(x))
for(i in 1:length(x)){
out[i] <- sum(rep(x[i],n)==(1:n))/n
}
return(out)
}
## Can you see how this is working?
ddiscunif(1,5)
ddiscunif(2,5)
# looks ok so far
ddiscunif(6,5) # better
ddiscunif(-1,5) # better
# but... what about if we feed in a vector:
ddiscunif(c(0,1,2),5)
### Best approach using
### built "%in%" function (due to Danping)
ddiscunif <- function(x,n){
index <- x%in%(1:n)
# match each element of x in the set {1,2,...,n}
# return logical values, i.e. TRUE or FALSE.
out <- rep(0, length(x))
out[index] <- 1/n
# "legitimate" points of x receive the point mass 1, others get 0.
return(out)
}
n <- 36
xseq <- -1:38
yseq <- ddiscunif(xseq,n)
plot(xseq,yseq,xlab="Values of x",ylim=c(0,1), ylab="P(X=x)",main=paste("pmf for Discrete Uniform distribution with n=",n))
# (5) Poisson Distribution
lambda <- 1
dpois(0,lambda)
# check:
lambda^0*exp(-lambda)/factorial(0)
dpois(1,lambda)
# check:
lambda^1*exp(-lambda)/factorial(1)
## Why is this the same? (Hint - try a different lambda)
dpois(2,lambda)
# check:
lambda^2*exp(-lambda)/factorial(2)
## Some silly values:
dpois(-1,lambda)
dpois(1.5,lambda) # gives a warning
xseq <- 0:10
yseq <- dpois(xseq,lambda)
sum(yseq)
## Is it really 1?
sum(yseq)-1 # How do we explain this? Rounding error
options(digits=15) #This increases the number of digits printed
sum(yseq)
plot(xseq,yseq,xlab="Values of x",ylim=c(0,1), ylab="P(X=x)",main=paste("pmf for Poisson distribution with lambda=",lambda))
# Try some other lambdas:
lambda <- 1
xseq <- 0:10
yseq <- dpois(xseq,lambda)
plot(xseq,yseq,xlab="Values of x",ylim=c(0,1), ylab="P(X=x)",main=paste("pmf for Poisson distribution with lambda=",lambda))
lambda <- 2
xseq <- 0:10
yseq <- dpois(xseq,lambda)
plot(xseq,yseq,xlab="Values of x",ylim=c(0,1), ylab="P(X=x)",main=paste("pmf for Poisson distribution with lambda=",lambda))
|
2662eec832a93d81154943c983457a20ebb1bdf8
|
47aad5007e300f78e3404ed0147f92cd2eb56df2
|
/R/confint.fitpot.R
|
52d901e65505f450eb641ecdb89c0aede865fe2d
|
[] |
no_license
|
cran/ercv
|
16c5c1cc44c7a76d955b35eddfd2c4db1d8cf07a
|
77db65e5be9e8c99d819b2fa1d162692a595f48a
|
refs/heads/master
| 2021-07-03T13:21:33.182423
| 2019-10-15T14:30:02
| 2019-10-15T14:30:02
| 97,489,837
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 601
|
r
|
confint.fitpot.R
|
confint.fitpot <- function(object, parm, level=0.95, ...)
{
if(class(object)!="fitpot") stop("Object should be of class fitpot")
cf <- object$coeff
ses <- c(attr(object, "evi.sd"), attr(object, "psi.sd"))
if (length(ses)==2)
{
pnames <- c("evi", "psi")
}else{
pnames <- c("psi")
}
if (missing(parm))
parm <- pnames
else if (is.numeric(parm))
parm <- pnames[parm]
a <- (1 - level)/2
a <- c(a, 1 - a)
fac <- qt(a, attr(object, "df.residual"))
ci <- array(NA_real_, dim = c(length(parm), 2L), dimnames = list(parm))
ci[] <- cf[parm] + ses[parm] %o% fac
ci
}
|
d8e71ccd402288a1dee5291bb63da93d9930d36f
|
46002cc5ac222968ce2e8637754447673d9639bd
|
/multivariate_ARMA.R
|
65b3b58464e933a038e727c9cb93b8a0c52f3431
|
[] |
no_license
|
bjorn81/Bayesian
|
dc9d01df6e68587cf1210f6898ebf847bf5b6d34
|
b137ba8aea4a66619f7e51db1c14254026be6d1d
|
refs/heads/master
| 2020-04-08T20:50:56.122287
| 2018-11-29T19:37:20
| 2018-11-29T19:37:20
| 159,717,791
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,819
|
r
|
multivariate_ARMA.R
|
library(forecast)
library(data.table)
library(rstan)
library(coda)
# Simulate data
error.model=function(n){rnorm(n, sd=.3)}
y0 <- arima.sim(model=list(ar=0.6, ma = -0.9), n=10000,
n.start=200, start.innov=rnorm(200, sd=.2),
rand.gen=error.model )
x1 <- arima.sim(model=list(ar=0.95), n=10000,
n.start=200, start.innov=rnorm(200, sd=.2),
rand.gen=error.model)
x2 <- arima.sim(model=list(ar=0.95), n=10000,
n.start=200, start.innov=rnorm(200, sd=.2),
rand.gen=error.model)
# Model for y
y <- y0 + 0.5*x2 + 0.2 * x1
# Create list of data
sdata2 <- list(
y = as.numeric(y),
x1 = as.numeric(x1),
x2 = as.numeric(x2),
T = length(y))
# Stan model code
mmmodel <- "
data {
int<lower=1> T; // num observation
real y[T]; // observed outputs
real x1[T];
real x2[T];
}
parameters {
real mu; // mean coeff
real phi; // autoregression coeff
real<lower = -1, upper = 1> theta; // moving avg coeff
real bx1; // own TV lag 3
real bx2; // comp TV lag 1
real<lower=0> sigma; // noise scale
}
model {
vector[T] nu; // prediction for time t
vector[T] eta; // ar error for time t
vector[T] peta; // ar error for time t
vector[T] epsilon; // model error for time t
nu[1] = mu + bx1*x1[1] + bx2*x2[1];
epsilon[1] = 0;
eta[1] = y[1] - nu[1];
peta[1] = eta[1];
for (t in 2:T) {
// this is the long run relationship between variables
nu[t] = mu + bx1*x1[t] + bx2*x2[t];
// eta is composite error term (AR and MA) which describes temporary deviations
// from long run relationships
eta[t] = y[t] - nu[t];
// peta is expected value of eta
peta[t] = phi*eta[t-1] + theta*epsilon[t-1];
// epsilon is difference between expected and actual deviation from long run
// relationship between model variables
epsilon[t] = eta[t] - peta[t];
}
mu ~ normal(0, 100);
phi ~ normal(0, 2);
theta ~ normal(0, 2);
// error term of model is normally distributed around zero
epsilon ~ normal(0, sigma); // likelihood
bx1 ~ normal(10, 20); // linear trend
bx2 ~ normal(-10, 20); // relative price
sigma ~ cauchy(0, 1);
}
"
#Compile and fit the model
mmm.stan <- stan(model_code = mmmodel, model_name = "example",
data = sdata2, warmup = 500, iter = 2000, chains = 3, cores = 3,
verbose = TRUE)
summary(mmm.stan)
mmm.stan.coda<-mcmc.list(lapply(1:ncol(mmm.stan),function(x) mcmc(as.array(mmm.stan)[,x,])))
plot(mmm.stan.coda)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.