blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a58236235df0fb3c5287e69b5f92387cff31567b
|
64238f37c5af76a0bb82ccb5765b243447fe545f
|
/ProjetoVisualiza/BusinessLogic.R
|
efea0a9600d9a7c685b1e98b353999e6a6ea08cd
|
[] |
no_license
|
Tocchetto/nwtapp
|
898331f4a302fc6d375f9ab03f80e1b1a127441f
|
d1de23b2cf453b59d6883dec49cd30053119e5ce
|
refs/heads/master
| 2021-01-20T08:26:43.538704
| 2017-10-27T12:50:07
| 2017-10-27T12:50:07
| 92,299,129
| 0
| 1
| null | 2017-05-24T14:05:57
| 2017-05-24T14:05:57
| null |
UTF-8
|
R
| false
| false
| 6,309
|
r
|
BusinessLogic.R
|
getSuffix <- function(variable){
if(variable == "CAPE")return(" J/kg")
if(variable == "CLSF" || variable == "GHFL" || variable == "CSSF" || variable == "OCES" || variable == "OCIS" ||
variable == "OLES" || variable == "OLIS")return(" W/m2")
if(variable == "RNSG" || variable == "RNOF" || variable == "EVTP" || variable == "EVPP" || variable == "NEVE" ||
variable == "PREC")return(" mm/ano")
if(variable == "PSLC" || variable == "PSLM")return(" hPa")
if(variable == "TP2M" || variable == "TSFC" || variable == "DP2M" || variable == "TGSC" || variable == "TGRZ" ||
variable == "MXTP" || variable == "MNTP")return(" °C")
if(variable == "USSL" || variable == "UZRS" || variable == "MDNV" || variable == "LWNV" || variable == "HINV")return(" 0-1")
if(variable == "UZRS")return(" %")
if(variable == "W100" || variable == "W10M")return(" m/s")
if(variable == "D100" || variable == "D10M")return(" graus meteorológicos")
}
getMapRaster <- function(variable, dec, variableType){
mapRaster <- paste('Tifs/Eta_MIROC5/Eta_MIROC5_20_',variableType,'_climate_annually_',variable,'_', dec, '0101_0000_v1.tif', sep = "")
print("getMapRaster")
print(mapRaster)
r <- raster(mapRaster,layer=10)
crs(r) <- CRS("+init=epsg:4326")
return(r)
}
getMapPal <- function(variable, dec, variableType){
if(variable == "MDNV" || variable == "LWNV" || variable == "HINV"){
mapRaster <- paste('Tifs/Eta_MIROC5/Eta_MIROC5_20_',variableType,'_climate_annually_',variable,'_', dec, '0101_0000_v1.tif', sep = "")
print("getMapPal")
print(mapRaster)
r <- raster(mapRaster,layer=10)
pal <- colorNumeric(c("#066867", "#31BFC1", "#78D0DC", "#ACE0EB", "#FDDEBF", "#FBAA6B", "#CF6028", "#5E260F"), values(r),
na.color = "transparent")
return(pal)
}
if(variable == "RNSG" || variable == "RNOF" || variable == "EVTP" || variable == "EVPP" || variable == "NEVE" ||
variable == "PREC"){
mapRaster <- paste('Tifs/Eta_MIROC5/Eta_MIROC5_20_',variableType,'_climate_annually_',variable,'_', dec, '0101_0000_v1.tif', sep = "")
print("getMapPal")
print(mapRaster)
r <- raster(mapRaster,layer=10)
pal <- colorNumeric(c("#FEFBDE", "#E4F1FA", "#CCFFFF", "#99FFFF", "#66CCCC", "#66CCCC"), values(r),
na.color = "transparent")
return(pal)
}
if(variable == "CLSF" || variable == "GHFL" || variable == "CSSF" || variable == "OCES" || variable == "OCIS" || variable == "OLES" ||
variable == "OLIS"){
mapRaster <- paste('Tifs/Eta_MIROC5/Eta_MIROC5_20_',variableType,'_climate_annually_',variable,'_', dec, '0101_0000_v1.tif', sep = "")
print("getMapPal")
print(mapRaster)
r <- raster(mapRaster,layer=10)
pal <- colorNumeric(c("#9999CC", "#9999CC", "#9966CC", "#9966CC", "#9966CC", "#663399"), values(r),
na.color = "transparent")
return(pal)
}
if(variable == "W100" || variable == "W10M" || variable == "D10M" || variable == "D100"){
mapRaster <- paste('Tifs/Eta_MIROC5/Eta_MIROC5_20_',variableType,'_climate_annually_',variable,'_', dec, '0101_0000_v1.tif', sep = "")
print("getMapPal")
print(mapRaster)
r <- raster(mapRaster,layer=10)
pal <- colorNumeric(c("#6699FF", "#66FF99", "#FFFF99", "#FFCC66", "#FF0000", "#990099"), values(r),
na.color = "transparent")
return(pal)
}
if(variable == "TP2M" || variable == "TSFC" || variable == "DP2M" || variable == "TGSC" || variable == "TGRZ" ||
variable == "MXTP" || variable == "MNTP"){
mapRaster <- paste('Tifs/Eta_MIROC5/Eta_MIROC5_20_',variableType,'_climate_annually_',variable,'_', dec, '0101_0000_v1.tif', sep = "")
print("getMapPal")
print(mapRaster)
r <- raster(mapRaster,layer=10)
pal <- colorNumeric(c("#FFFFFF", "#E1F6FB", "#BCEEFB", "#B9ECD8", "#CADB92", "#FFEB88", "#FBC25E", "#FF9933",
"#FF7B33", "#CD5B12", "#FF3C1C"), values(r),
na.color = "transparent")
return(pal)
}
if(variable == "UZRS" || variable == "USSL" || variable == "UR2M"){
mapRaster <- paste('Tifs/Eta_MIROC5/Eta_MIROC5_20_',variableType,'_climate_annually_',variable,'_', dec, '0101_0000_v1.tif', sep = "")
print("getMapPal")
print(mapRaster)
r <- raster(mapRaster,layer=10)
pal <- colorNumeric(c("#00CC00", "#339933", "#339933", "#006600", "#006600", "#000000"), values(r),
na.color = "transparent")
return(pal)
}
else{
mapRaster <- paste('Tifs/Eta_MIROC5/Eta_MIROC5_20_',variableType,'_climate_annually_',variable,'_', dec, '0101_0000_v1.tif', sep = "")
print("getMapPal")
print(mapRaster)
r <- raster(mapRaster)
pal <- colorNumeric(c("#ffffff", "#000000"), values(r),
na.color = "transparent")
return(pal)
}
}
getUserShapeColor <- function(variable){
if(variable == "MDNV" || variable == "LWNV" || variable == "HINV"){
return(c("#066867", "#31BFC1", "#78D0DC", "#ACE0EB", "#FDDEBF", "#FBAA6B", "#CF6028", "#5E260F"))
}
if(variable == "RNSG" || variable == "RNOF" || variable == "EVTP" || variable == "EVPP" || variable == "NEVE" ||
variable == "PREC"){
return(c("#FEFBDE", "#E4F1FA", "#CCFFFF", "#99FFFF", "#66CCCC", "#66CCCC"))
}
if(variable == "CLSF" || variable == "GHFL" || variable == "CSSF" || variable == "OCES" || variable == "OCIS" || variable == "OLES" ||
variable == "OLIS"){
return(c("#9999CC", "#9999CC", "#9966CC", "#9966CC", "#9966CC", "#663399"))
}
if(variable == "W100" || variable == "W10M" || variable == "D10M" || variable == "D100"){
return(c("#6699FF", "#6699FF", "#66FF99", "#FFFF99", "#FFCC66", "#FFCC66", "#FF0000", "#FF0000", "#990099", "#990099"))
}
if(variable == "TP2M" || variable == "TSFC" || variable == "DP2M" || variable == "TGSC" || variable == "TGRZ" ||
variable == "MXTP" || variable == "MNTP"){
return(c("#E2F2FA", "#BCEEFB", "#BCEEFB", "#B9ECD8", "#CADB92", "#FFEB88", "#FBC25E", "#FF7B33",
"#CD5B12", "#FF3C1C", "#663399"))
}
if(variable == "UZRS" || variable == "USSL" || variable == "UR2M"){
return(c("#00CC00", "#339933", "#339933", "#006600", "#006600", "#000000"))
}
else{
return(c("#ffffff", "#000000"))
}
}
|
0586cfeeaf2f8e45f7a6b61ba3582d99203d3387
|
6cd15fd0e072741b5db8284ca20bf6534e495a20
|
/R/data_error_b.R
|
098b92fa95b38356a06fc620652b6f986319b80c
|
[
"MIT"
] |
permissive
|
renands/RMLPCA
|
fffbd18c502e2e3ccfafaa4be677159877cb831b
|
039d34002fe4b98688869184e5139a3b842bfa00
|
refs/heads/master
| 2023-05-09T07:34:03.769415
| 2021-05-31T19:22:13
| 2021-05-31T19:22:13
| 273,766,066
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 579
|
r
|
data_error_b.R
|
#' Errors generated for mlpca_b model
#'
#' A dataset where each column contain values from a normal density with mean
#' = 0 and standard deviation from 0.2 to 1, the standard deviations differs in
#' the column. The main ideia is described in figure 3 on Wentzell, P. D.
#' "Other topics in soft-modeling: maximum likelihood-based soft-modeling
#' methods." (2009): 507-558.
#'
#' @format A matrix with 300 rows and 20 columns
#' @references Wentzell, P. D. "Other topics in soft-modeling:
#' maximum likelihood-based soft-modeling methods." (2009): 507-558.
#'
"data_error_b"
|
b6003236e3ebfeb3d9635a94ae2667e21c7aa66b
|
6fc77d31ad1688033d6dd9830d3c531760a6aabf
|
/tests/testthat/test-prediction-missing-years.R
|
dd1f640cbb92efd80fc1248cd6fdc4f507557157
|
[] |
no_license
|
pbs-assess/sdmTMB
|
ba24efb807680f28fdfa9a27a2a775b1817b49c8
|
6aa4e8a7847318f81e91a0bfb6c85001db07d0da
|
refs/heads/main
| 2023-09-03T17:06:22.517565
| 2023-08-18T20:54:48
| 2023-08-18T20:54:48
| 149,399,567
| 133
| 12
| null | 2023-05-11T18:43:58
| 2018-09-19T05:59:53
|
R
|
UTF-8
|
R
| false
| false
| 674
|
r
|
test-prediction-missing-years.R
|
test_that("Prediction works with missing time", {
skip_on_cran()
skip_if_not_installed("INLA")
fit <- sdmTMB(
density ~ 1,
data = pcod_2011, mesh = pcod_mesh_2011, time = "year",
family = tweedie(link = "log")
)
nd <- pcod_2011[pcod_2011$year %in% c(2013, 2017), ]
p1 <- predict(fit, newdata = nd)
p2 <- predict(fit, newdata = pcod_2011)
p2 <- p2[p2$year %in% c(2013, 2017), ]
expect_equal(nrow(p1), nrow(p2))
expect_equal(p1$est, p2$est)
expect_equal(p1$year, p2$year)
expect_equal(p1, p2)
expect_warning(p3 <- predict(fit, newdata = nd, return_tmb_object = TRUE), regexp = "time")
expect_error(get_index(p3), regexp = "time")
})
|
d5832307534199924256c708a922b760db9142e5
|
38d166ede31183e2121388be0f66fe9d7ac4e93a
|
/man/phyloseq_coverage.Rd
|
69446872cc81f88046beaf6011d2199a29687f7e
|
[
"MIT"
] |
permissive
|
vmikk/metagMisc
|
a01151347b620745b278265700e503dc74669af5
|
310b1a40951de46348084e150d7471ed66feb0c8
|
refs/heads/master
| 2023-08-31T08:41:27.684905
| 2023-08-28T10:09:50
| 2023-08-28T10:09:50
| 76,531,351
| 38
| 12
|
MIT
| 2019-07-29T06:12:12
| 2016-12-15T06:40:05
|
R
|
UTF-8
|
R
| false
| true
| 1,584
|
rd
|
phyloseq_coverage.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/phyloseq_coverage.R
\name{phyloseq_coverage}
\alias{phyloseq_coverage}
\title{Estimate the observed abundance-based sample coverage for phyloseq object}
\usage{
phyloseq_coverage(physeq, correct_singletons = FALSE, add_attr = T)
}
\arguments{
\item{physeq}{A phyloseq-class object}
\item{correct_singletons}{Logical; if TRUE, singleton counts will be corrected with modified Good–Turing frequency formula (Chiu, Chao 2016)}
\item{add_attr}{Logical; if TRUE, additional attributes (list of species abundances and singleton correction flag) will be added to the results}
}
\value{
Data frame with coverage estimates for each sample
}
\description{
phyloseq_coverage estimates the sample completeness for the individual-based
abundance data (number of sequencing reads) stored in 'phyloseq'-class objects.
}
\details{
Coverage represents a measure of sample completeness and is defined as the proportion of
the total number of individuals in a community that belong to the species represented in the sample.
Coverage complement (1 - Coverage) gives the proportion of the community belonging to unsampled
species or the "coverage deficit" (Chao, Jost, 2012).
Estimation of coverage is based on the number of singletons and doubletons in the sample.
}
\examples{
data("esophagus")
phyloseq_coverage(esophagus)
}
\references{
Chao A, Jost L. (2012) Coverage-based rarefaction and extrapolation: standardizing samples by completeness rather than size // Ecology 93(12): 2533–2547. DOI: 10.1890/11-1952.1
}
|
e9500d1f97b0e8896c88492266e797e398a627ad
|
5baf5ec86241518b59f6e3fa33721ef6322baa6b
|
/Construction/stream_tributary_locations.R
|
1c38a6b4d74df2edd323c7f91e892d02c45c258d
|
[
"CC-BY-4.0"
] |
permissive
|
CCheCastaldo/MSHMicroMet
|
a4539d9759416b0f717a9951d54d3ea195c629f0
|
b4bc7852f6bdaa58b8671979dcb7986c5c298bb3
|
refs/heads/main
| 2023-04-19T05:32:13.644932
| 2022-12-21T21:26:04
| 2022-12-21T21:26:04
| 561,350,602
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 356
|
r
|
stream_tributary_locations.R
|
# geolocate all stream hobo installations
stream_locations_df <-
read_excel("AccessMigration/StreamTributaryLocations.xlsx") %>%
mutate(easting = round(easting), northing = round(northing)) %>%
mutate(terrestrial = FALSE) %>%
dplyr::select(survey_id_legacy,
site_id,
site_description,
terrestrial,
zone,
easting,
northing)
|
84600b8c6c086cc1b55c688d9e1f89830fc4b1d8
|
156140b80fd46aa214fed8d06407ecf9220066c5
|
/cumulative-cases-animation.R
|
39edc1f763655557680b481882b6a10abeb30998
|
[] |
no_license
|
glaswasser/animated-running-corona-bar-plot
|
4a0298c614d09e55677d55611ad82294508dfaa5
|
9d77cb3d647ed5ee67a9f95b161456bf8571033b
|
refs/heads/master
| 2023-03-14T19:17:45.178581
| 2021-03-14T21:53:22
| 2021-03-14T21:53:22
| 256,786,881
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,834
|
r
|
cumulative-cases-animation.R
|
library(shiny)
library(nCov2019)
library(plotly)
library(tidyverse)
library(gganimate)
## DESCRIPTION:
# a running corona-barchart
#### LOAD DATA ####
y <- load_nCov2019(lang = 'en', source='github')
# get global data:
d = y['global']
# FORMAT DATA
formatted <- d %>%
group_by(time) %>% #group by the date
mutate(rank = min_rank(-cum_confirm) * 1, # create a rank for every day
Value_lbl = paste0(" ", round(cum_confirm))) %>% # create labels for the
group_by(country) %>%
filter(rank <= 10) %>%#
filter(time > as.Date("2020-02-15")) %>%
ungroup()
# animate
animated <-
# create a ggplot with ..
ggplot(formatted, aes(rank, group = country,
fill = as.factor(country), color = as.factor(country))) +
# the aesthetics for the bars: y position, height, alpha is the opacity
geom_tile(aes(y = cum_confirm/2,
height = cum_confirm,
width = 0.9), alpha = 0.8, color = NA) +
# the labels for the countries in front, the paste0 needs to be there for the right position
geom_text(aes(y = 0, label = paste(country, " ")), vjust = 0.1, hjust = 1) +
# the labels for the numbers. somehow this only works with the labels for rounded numbers like that.
geom_text(aes(y=cum_confirm, label = paste0(Value_lbl), hjust = 0)) +
# flip the coordinate system, clip off for the right display
coord_flip(clip = "off", expand = FALSE) +
# reverse the x-scale to have the largest bar on plot
scale_x_reverse() +
# to make the background grid lines moving (view_follow creates warnings but can be ignored)
scale_y_continuous(labels = scales::comma) +
view_follow(fixed_x = TRUE) +
# this is for removing redundant labels etc:
guides(color = FALSE, fill = FALSE) +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position="none",
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
panel.grid.major.x = element_line( size=.1, color="grey" ),
panel.grid.minor.x = element_line( size=.1, color="grey" ),
# aesthetics for the title, subtitle etc.
plot.title=element_text(size=25, hjust=0.5, face="bold", colour="grey", vjust=-1),
plot.subtitle=element_text(size=18, hjust=0.5, face="italic", color="grey"),
plot.caption =element_text(size=8, hjust=0.5, face="italic", color="grey"),
plot.background=element_blank(),
plot.margin = margin(2, 2, 2, 4, "cm")) +
# the transition between the states is defined here
transition_states(time, transition_length = 4, state_length = 1) +
# to make the countries overtake each other more smoothly:
# more options to smoothen the transitions
enter_grow() +
exit_shrink() +
ease_aes("linear") +
labs(title = 'Cumulative confirmed cases on {closest_state}',
subtitle = "Top 10 Countries",
caption = "Tianzhi Wu, Erqiang Hu, Xijin Ge*, Guangchuang Yu*.
Open-source analytics tools for studying the COVID-19 coronavirus outbreak. medRxiv, 2020.02.25.20027433.
doi: https://doi.org/10.1101/2020.02.25.20027433")
# this is the animation here
animated
# alternatively, use
animate(animated, 100, fps = 25, duration = 20, width = 800, height = 600)
# create a a gif:
animate(animated, 100, fps = 25, duration = 20, width = 1200, height = 1000,
renderer = gifski_renderer("confirmed_cases.gif"))
# create a mp4:
animate(animated, 200, fps = 30, width = 1200, height = 1000,
renderer = ffmpeg_renderer()) -> for_mp4anim_save("animation.mp4", animation = for_mp4 )
|
55e6ea357573460d0c409444b0af0b09d4a3c7d9
|
62415c1e371e7377e4a4582e2ed12411fc1db754
|
/man/ca_food_group.Rd
|
f28bfe0dbc07d6a56b0eeb69b88c4cfe1bfdf2c0
|
[] |
no_license
|
yihanwu/CAnutrients
|
aff218519191cdc33b42a8b68773da76eddaa16c
|
67ac440f2292618f5b98faa7286fd1212b779599
|
refs/heads/master
| 2020-04-27T11:34:47.164720
| 2019-03-07T08:18:34
| 2019-03-07T08:18:34
| 174,300,566
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,340
|
rd
|
ca_food_group.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CAnutrients.R
\docType{data}
\name{ca_food_group}
\alias{ca_food_group}
\title{Food group dataset}
\format{a \code{tbl_df} with 23 observations of the following 4 fields:
\describe{
\item{FoodGroupID}{Sequential number generated by the database for the food
groups. Joins with \link{ca_food_name}.}
\item{FoodGroupCode}{Identifier code for the Canadian Nutrient File food
groups. There are 23 different CNF food groups.}
\item{FoodGroupName}{Food group name in English. }
\item{FoodGroupNameF}{Food group name in French.}
}}
\source{
The data comes from Canadian Nutrient File 2015.
}
\description{
This dataset is a support or "list" table that is used to link to the
\link{ca_food_name} table. It contains a list of 23 different group headings
(in English and French) based on similar characteristics of the foods.
}
\examples{
ca_food_group
if (require("dplyr")){
head(ca_food_name, 20) \%>\%
left_join(ca_food_group) \%>\%
select(FoodDescription, FoodGroupName)
}
}
\seealso{
\link{CAnutrients}
\link{ca_conversion_factor}
\link{ca_food_name}
\link{ca_food_source}
\link{ca_measure_name}
\link{ca_nutrient_amount}
\link{ca_nutrient_name}
\link{ca_nutrient_source}
\link{ca_refuse_amount}
\link{ca_refuse_name}
\link{ca_yield_amount}
\link{ca_yield_name}
}
|
093bf46913d561edd910337475eff973ee597e66
|
5bd4b82811be11bcf9dd855e871ce8a77af7442f
|
/gap/R/hap.score.R
|
252bc89c96dbbb927ba490b5fd907387f9e6d942
|
[] |
no_license
|
jinghuazhao/R
|
a1de5df9edd46e53b9dc90090dec0bd06ee10c52
|
8269532031fd57097674a9539493d418a342907c
|
refs/heads/master
| 2023-08-27T07:14:59.397913
| 2023-08-21T16:35:51
| 2023-08-21T16:35:51
| 61,349,892
| 10
| 8
| null | 2022-11-24T11:25:51
| 2016-06-17T06:11:36
|
R
|
UTF-8
|
R
| false
| false
| 18,814
|
r
|
hap.score.R
|
#' Score statistics for association of traits with haplotypes
#'
#' @param y Vector of trait values. For trait.type = "binomial", y must have values of 1 for event, 0 for no event.
#' @param geno Matrix of alleles, such that each locus has a pair of adjacent columns of alleles, and the order of columns corresponds to the order of loci on a chromosome. If there are K loci, then ncol(geno) = 2*K. Rows represent alleles for each subject.
#' @param trait.type Character string defining type of trait, with values of "gaussian", "binomial", "poisson", "ordinal".
#' @param offset Vector of offset when trait.type = "poisson".
#' @param x.adj Matrix of non-genetic covariates used to adjust the score statistics. Note that intercept should not be included, as it will be added in this function.
#' @param skip.haplo Skip score statistics for haplotypes with frequencies < skip.haplo.
#' @param locus.label Vector of labels for loci, of length K (see definition of geno matrix).
#' @param miss.val Vector of codes for missing values of alleles.
#' @param n.sim Number of simulations for empirical p-values. If n.sim=0, no empirical p-values are computed.
#' @param method method of haplotype frequency estimation, "gc" or "hap".
#' @param id an added option which contains the individual IDs.
#' @param handle.miss flag to handle missing genotype data, 0=no, 1=yes.
#' @param mloci maximum number of loci/sites with missing data to be allowed in the analysis.
#' @param sexid flag to indicator sex for data from X chromosome, i=male, 2=female.
#'
#' @details
#' Compute score statistics to evaluate the association of a trait with haplotypes, when linkage phase is unknown and diploid marker
#' phenotypes are observed among unrelated subjects. For now, only autosomal loci are considered. This package haplo.score
#' which this function is based is greatly acknowledged.
#'
#' @export
#' @return
#' List with the following components:
#' - score.global Global statistic to test association of trait with haplotypes that have frequencies >= skip.haplo.
#' - df Degrees of freedom for score.global.
#' - score.global.p P-value of score.global based on chi-square distribution, with degrees of freedom equal to df.
#' - score.global.p.sim P-value of score.global based on simulations (set equal to NA when n.sim=0).
#' - score.haplo Vector of score statistics for individual haplotypes that have frequencies >= skip.haplo.
#' - score.haplo.p Vector of p-values for score.haplo, based on a chi-square distribution with 1 df.
#' - score.haplo.p.sim Vector of p-values for score.haplo, based on simulations (set equal to NA when n.sim=0).
#' - score.max.p.sim P-value of maximum score.haplo, based on simulations (set equal to NA when n.sim=0).
#' - haplotype Matrix of hapoltypes analyzed. The ith row of haplotype corresponds to the ith item of score.haplo, score.haplo.p, and score.haplo.p.sim.
#' - hap.prob Vector of haplotype probabilies, corresponding to the haplotypes in the matrix haplotype.
#' - locus.label Vector of labels for loci, of length K (same as input argument).
#' - n.sim Number of simulations.
#' - n.val.global Number of valid simulated global statistics.
#' - n.val.haplo Number of valid simulated score statistics (score.haplo) for individual haplotypes.
#'
#' @details This is a version which substitutes haplo.em.
#'
#' @references
#' \insertRef{schaid02}{gap}
#'
#' @examples
#' \dontrun{
#' data(hla)
#' y<-hla[,2]
#' geno<-hla[,3:8]
#' # complete data
#' hap.score(y,geno,locus.label=c("DRB","DQA","DQB"))
#' # incomplete genotype data
#' hap.score(y,geno,locus.label=c("DRB","DQA","DQB"),handle.miss=1,mloci=1)
#' unlink("assign.dat")
#'
#' ### note the differences in p values in the following runs
#' data(aldh2)
#' # to subset the data since hap doesn't handle one allele missing
#' deleted<-c(40,239,256)
#' aldh2[deleted,]
#' aldh2<-aldh2[-deleted,]
#' y<-aldh2[,2]
#' geno<-aldh2[,3:18]
#' # only one missing locus
#' hap.score(y,geno,handle.miss=1,mloci=1,method="hap")
#' # up to seven missing loci and with 10,000 permutations
#' hap.score(y,geno,handle.miss=1,mloci=7,method="hap",n.sim=10000)
#'
#' # hap.score takes considerably longer time and does not handle missing data
#' hap.score(y,geno,n.sim=10000)
#' }
#'
#' @keywords models regression
hap.score <- function(y, geno, trait.type="gaussian",
offset = NA, x.adj = NA, skip.haplo=.005,
locus.label=NA, miss.val=0, n.sim=0, method="gc", id=NA,
handle.miss=0, mloci=NA, sexid=NA)
{
trait.int <- charmatch(trait.type, c("gaussian", "binomial", "poisson", "ordinal"))
if(is.na(trait.int)) stop("Invalid trait type")
if(trait.int == 0) stop("Ambiguous trait type")
if(length(y)!=nrow(geno)) stop("Dims of y and geno are not compatible")
n.loci <- ncol(geno)/2
if(n.loci != (floor(ncol(geno)/2))) stop("Odd number of cols of geno")
if(handle.miss==0)
{
miss <- apply(is.na(geno),1,any)
if(!all(is.na(miss.val))) {
for(mval in miss.val){
miss <- miss | apply(geno==mval, 1, any)
}
}
}
else
{
if(is.na(mloci)) stop("Maximum number of missing loci (mloci) not specified")
nmiss <- apply(is.na(geno),1,sum)
if(!all(is.na(miss.val))) {
for(mval in miss.val) {
nmiss <- nmiss + apply(geno==mval, 1, sum)
}
}
if(mloci<0 | mloci >= n.loci) stop("Invalid control for number of missing loci")
miss <- rep(F, length(y))
for(i in 1:length(y)) if(nmiss[i] > mloci*2) miss[i] <- T
}
adjusted <- T
if( all(is.na(x.adj))) adjusted <- F
if(adjusted){
x.adj <- as.matrix(x.adj)
if(nrow(x.adj)!=length(y)) stop("Dims of y and x.adj are not compatible")
}
miss <- miss | is.na(y)
if(adjusted) miss <- miss| apply(is.na(x.adj),1,any)
if(trait.int==3) {
if(all(is.na(offset))) stop("Missing offset")
miss <- miss | is.na(offset)
offset <- offset[!miss]
}
y <- as.numeric(y[!miss])
geno <- geno[!miss,]
if(adjusted) x.adj <- x.adj[!miss,,drop=F]
if(trait.int==2) {
if(!all(y==1|y==0)) stop("Invalid y values")
if(all(y==1) | all(y==0)) stop("No variation in y values")
}
if(trait.int==4){
y <- factor(y)
y.lev <- levels(y)
y <- as.numeric(y)
if(max(y) < 3) stop("Less than 3 levels for y values")
}
n.subj <- length(y)
if(all(is.na(id))) id <- 1:n.subj
method.id<-charmatch(method, c("gc", "hap", "phase"))
if(is.na(method.id)) stop("Invalid selection of method")
if(method.id == 0) stop("Ambiguous method")
else if(method.id==1) haplo <- gc.em(data=geno, locus.label, converge.eps=0.00001, maxiter=5000, handle.miss=handle.miss, miss.val=miss.val)
else if(method.id==2) haplo <- hap.em(id, data=geno, locus.label, converge.eps=0.00001, maxiter=5000, miss.val=miss.val)
if(method.id<3 & !haplo$converge) stop("EM for haplo failed to converge")
hap1 <- haplo$hap1code
hap2 <- haplo$hap2code
indx <- haplo$indx.subj
post <- haplo$post
nreps <- as.vector(haplo$nreps)
uhap<-haplo$uhap
which.haplo<-haplo$hap.prob>=skip.haplo
uhap<-uhap[which.haplo]
x <- outer(hap1,uhap,"==") + outer(hap2,uhap,"==")
n.x <- ncol(x)
x.post<-matrix(rep(NA, n.subj * n.x), ncol=n.x)
for(j in 1:n.x){
x.post[,j] <- tapply(x[,j]*post, indx, sum)
}
if(trait.int <= 3){
if(!adjusted){
mu <- switch(trait.int, mean(y), mean(y), sum(y)/sum(offset) )
a <- switch(trait.int, var(y), 1, 1)
x.adj <- matrix(rep(1,n.subj),ncol=1)
}
if(adjusted){
reg.out <- glm(y ~ x.adj, family=trait.type)
x.adj <- cbind(rep(1,n.subj),x.adj)
mu <- reg.out$fitted.values
a <- switch(trait.int,sum(reg.out$residuals^2)/reg.out$df.residual,1, 1)
}
v <- switch(trait.int, 1/a, mu*(1-mu), mu )
tmp <- hap.score.glm(y, mu, a, v, x.adj, nreps, x.post, post, x)
u.score <- tmp$u.score
v.score <- tmp$v.score
}
if(trait.int ==4) {
if(adjusted){
for(p in c("rms")) {
if (length(grep(paste("^package:", p, "$", sep=""), search())) == 0) {
if (!requireNamespace(p, quietly = TRUE))
warning(paste("This function needs package `", p, "' to be fully functional; please install", sep=""))
}
}
reg.out <- rms::lrm(y ~ x.adj)
K <- max(y)
n.xadj <- ncol(x.adj)
alpha <- reg.out$coef[1:(K-1)]
beta <- reg.out$coeff[K:(K-1 + n.xadj)]
tmp <- hap.score.podds(y, alpha, beta, x.adj, nreps, x.post, post, x)
}
if(!adjusted){
tbl <- table(y)
s <- 1- (cumsum(tbl)-tbl)/n.subj
alpha <- - log((1-s[-1])/s[-1])
tmp <- hap.score.podds(y, alpha, beta=NA, x.adj=NA, nreps, x.post, post, x)
}
u.score <- tmp$u.score
v.score <- tmp$v.score
}
tmp <- haplo.stats::Ginv(v.score)
df <- tmp$rank
g.inv <- tmp$Ginv
score.global <- u.score%*% g.inv %*%u.score
score.haplo <- u.score / sqrt(diag(v.score))
score.max <- max(score.haplo^2)
if(n.sim==0){
score.global.p.sim <- NA
score.haplo.p.sim <- rep(NA,length(score.haplo))
score.max.p.sim <- NA
n.val.global <- NA
n.val.haplo <- NA
}
if(n.sim > 0){
score.global.rej <- 0
score.haplo.rej <- rep(0,length(score.haplo))
score.max.rej <- 0
n.val.global <- 0
n.val.haplo <- 0
if(trait.int<=3){
mu.rand <- mu
v.rand <- v
}
for(i in 1:n.sim){
rand.ord <- order(runif(n.subj))
if(trait.int <=3){
if(adjusted){
mu.rand <- mu[rand.ord]
v.rand <- switch(trait.int, v, v[rand.ord], v[rand.ord])
}
tmp <- hap.score.glm(y[rand.ord], mu.rand, a, v.rand,
x.adj[rand.ord,], nreps, x.post, post, x)
}
if(trait.int ==4){
if(adjusted){
tmp <- hap.score.podds(y[rand.ord], alpha, beta,
x.adj[rand.ord,,drop=F],nreps, x.post, post, x)
}
if(!adjusted) {
tmp <- hap.score.podds(y[rand.ord], alpha, beta=NA,
x.adj=NA,nreps, x.post, post, x)
}
}
u.score <- tmp$u.score
v.score <- tmp$v.score
tmp <- haplo.stats::Ginv(v.score)
g.inv <- tmp$Ginv
score.global.sim <- u.score %*% g.inv %*% u.score
score.haplo.sim <- (u.score / sqrt(diag(v.score)))^2
score.max.sim <- max(score.haplo.sim)
if(!is.na(score.global.sim)) {
n.val.global <- n.val.global +1
if(score.global.sim >= score.global) score.global.rej <- score.global.rej +1
}
if(!any(is.na(score.haplo.sim))){
n.val.haplo <- n.val.haplo + 1
score.haplo.rej <- score.haplo.rej +
ifelse(score.haplo.sim >= score.haplo^2, 1, 0)
if(score.max.sim >= score.max) score.max.rej <- score.max.rej +1
}
}
score.global.p.sim <- score.global.rej / n.val.global
score.haplo.p.sim <- score.haplo.rej / n.val.haplo
score.max.p.sim <- score.max.rej / n.val.haplo
}
score.global.p <- 1 - pchisq(score.global,df)
score.haplo.p <- 1-pchisq(score.haplo^2,1)
if(all(is.na(locus.label))) {
locus.label<- paste("loc-",1:n.loci,sep="")
}
obj <- (list(score.global=score.global, df=df,score.global.p=score.global.p,
score.global.p.sim=score.global.p.sim,
score.haplo=score.haplo,score.haplo.p=score.haplo.p,
score.haplo.p.sim=score.haplo.p.sim,
score.max.p.sim=score.max.p.sim,
haplotype=haplo$haplotype[which.haplo,],
hap.prob=haplo$hap.prob[which.haplo],
locus.label=locus.label,
n.sim=n.sim, n.val.global=n.val.global, n.val.haplo=n.val.haplo))
class(obj) <- "hap.score"
return(obj)
}
hap.score.glm <- function(y,mu,a,v,x.adj,nreps,x.post, post, x)
{
u.mtx <- (y-mu)*x.post / a
u.score <- apply(u.mtx,2,sum)
# Var matrix for x.adj covariates
v.11 <- t(x.adj * v) %*% x.adj
# Var matrix for covar(x.adj, x.post)
v.21 <- t(x.post) %*% (x.adj * v)
# Var matrix for haplo scores
res <- ( (y - mu)/a ) ^2
t1 <- rep( (v-res) ,nreps) * post
v.22 <- t(x*t1) %*% x + t(u.mtx) %*% u.mtx
# Var matrix for haplo scores, adjusted for x.adj
v.score <- v.22 - v.21 %*% solve(v.11) %*% t(v.21)
return(list(u.score=u.score, v.score=v.score))
}
hap.score.podds <- function(y, alpha, beta=NA, x.adj=NA, nreps, x.post, post, x)
{
###################################################################
#
# If U=c(u.a, u.e, u.g), where
# u.a = score for alpha's
# u.e = score for unambiguous (x.adj) covariates
# u.g = score for ambiguous haplotypes
#
# Then the upper triangle of Var(U) can be partitioned as
#
# | v.aa v.ae v.ag | | |
# V(U) = | v.ee v.eg | = | v.11 v.12 |
# | v.gg | | v.gg |
#
# where v.12 is composed of v.aa, v.ae, v.ee
# v.12 is composed of v.ag, v.eg
#
# and Var(u.g) = v.gg - v.12 * v.12(inv) * t(v.12)
#
# The following computes each of the submatrices as needed
# to determine u.g and Var(u.g)
#
##################################################################
adjusted <- T
if(any(is.na(x.adj))) adjusted <- F
if(adjusted) n.xadj <- ncol(x.adj)
n.x <- ncol(x)
K <- max(y)
# to make suscripting easier, append Inf to front of alpha,
# as place-holder for alpha[1] = Inf
alpha <- c(Inf, alpha)
if(adjusted){
s <- ifelse(y==1, 1, 1/(1 + exp(-(alpha[y ] + x.adj %*% beta ))) )
s.p <- ifelse(y==K, 0, 1/(1 + exp(-(alpha[y+1] + x.adj %*% beta ))) )
}
if(!adjusted){
s <- ifelse(y==1, 1, 1/(1 + exp(-(alpha[y ] ))) )
s.p <- ifelse(y==K, 0, 1/(1 + exp(-(alpha[y+1] ))) )
}
w1 <- (s*(1-s) - s.p*(1-s.p))/(s - s.p)
u.mtx <- w1 * x.post
u.score <- apply(u.mtx,2,sum)
# compute information matrix for alpha-beta (v.ab) and alpha-alpha (v.aa)
tmp1 <- (s + s.p^2 - 2*s*s.p)*s.p*(1-s.p)/(s-s.p)^2
tmp2 <- (s.p + s^2 - 2*s*s.p)*s*(1-s)/(s-s.p)^2
tmp3 <- s.p*(1-s.p)*s*(1-s)/(s-s.p)^2
v.ag <- matrix(rep(0, (K-1)*n.x), ncol=n.x)
if(adjusted) v.ae <- matrix(rep(0, (K-1)*n.xadj), ncol=n.xadj)
v.aa <- matrix(rep(0,(K-1)^2),ncol=(K-1))
n.subj <- length(y)
for(j in 2:K){
wt <- rep(0,n.subj)
wt <- ifelse(y==(j-1), (tmp1 - tmp3), wt)
wt <- ifelse(y==j, (tmp2 - tmp3), wt)
v.ag[(j-1),] <- apply(wt * x.post, 2,sum)
if(adjusted) v.ae[(j-1),] <- apply(wt * x.adj, 2,sum)
v.aa[(j-1),(j-1)] <- sum(tmp1[y==(j-1)]) + sum(tmp2[y==j])
if(j < K) v.aa[(j-1), j] <- -sum(tmp3[y==j])
}
# fill in lower tri of v.aa to make it symmetric
v.aa <- v.aa + t( (col(v.aa) > row(v.aa))*v.aa )
# Louis' method for v.gg
w2 <- s*(1-s) + s.p*(1-s.p)
t1 <- rep( (w2 - w1^2), nreps) * post
v.gg <- t(x*t1) %*% x + t(u.mtx) %*% u.mtx
if(adjusted){
v.ee <- t(w2*x.adj) %*% x.adj
v.eg <- t(w2*x.adj) %*% x.post
v.11 <- rbind( cbind(v.aa, v.ae), cbind(t(v.ae),v.ee) )
v.12 <- rbind(v.ag,v.eg)
v.score <- v.gg - t(v.12) %*% solve(v.11) %*% v.12
}
if(!adjusted){
v.score <- v.gg - t(v.ag) %*% solve(v.aa) %*% v.ag
}
return(list(u.score=u.score, v.score=v.score))
}
#' Plot haplotype frequencies versus haplotype score statistics
#'
#' Method function to plot a class of type hap.score
#'
#' @param x The object returned from hap.score (which has class hap.score).
#' @param ... Optional arguments.
#'
#' @export
#' @return
#' Nothing is returned.
#'
#' This is a plot method function used to plot haplotype frequencies on
#' the x-axis and haplotype-specific scores on the y-axis. Because
#' hap.score is a class, the generic plot function
#' can be used, which in turn calls this plot.hap.score function.
#'
#' @references
#' Schaid DJ, Rowland CM, Tines DE, Jacobson RM, Poland GA (2002)
#' Score tests for association of traits with haplotypes when
#' linkage phase is ambiguous. Amer J Hum Genet 70:425-34
#'
#' @seealso [`hap.score`]
#'
#' @examples
#' \dontrun{
#' save <- hap.score(y, geno, trait.type = "gaussian")
#'
#' # Example illustrating generic plot function:
#' plot(save)
#'
#' # Example illustrating specific method plot function:
#' plot.hap.score(save)
#' }
#'
#' @keywords hplot
plot.hap.score <- function(x, ...){
plot(x$hap.prob, x$score.haplo, xlab="Haplotype Frequency",
ylab="Haploltype Score Statistic", ...)
invisible()
}
#' Print a hap.score object
#'
#' Method function to print a class of type hap.score
#'
#' @param x The object returned from hap.score (which has class hap.score).
#' @param ... Optional argunents.
#'
#' @export
#' @return Nothing is returned.
#'
#' This is a print method function used to print information from
#' hap.score class, with haplotype-specific information given in a
#' table. Because hap.score is a class, the generic print function
#' can be used, which in turn calls this print.hap.score function.
#'
#' @references
#' Schaid DJ, Rowland CM, Tines DE, Jacobson RM, Poland GA (2002)
#' Score tests for association of traits with haplotypes when
#' linkage phase is ambiguous. Amer J Hum Genet 70:425-34
#'
#' @seealso [`hap.score`]
#'
#' @examples
#' \dontrun{
#' save <- hap.score(y, geno, trait.type = "gaussian")
#'
#' # Example illustrating generic print function:
#' print(save)
#'
#' # Example illustrating specific method print function:
#' print.hap.score(save)
#' }
#'
#' @keywords print
print.hap.score <- function(x, ...){
# print of global score stats:
cat("\nGlobal Score Statistics\n\n")
cat(paste("global-stat = ",round(x$score.global,5),", df = ",x$df,
", p-val = ",round(x$score.global.p,5),sep=""))
if(x$n.sim>0) cat(", sim. p-val = ",x$score.global.p.sim,"\n\n")
if(x$n.sim>0) cat("max-stat sim. p-val = ",x$score.max.p.sim)
cat("\n\n")
# create table for haplotype specific stats:
tbl <- cbind(x$haplotype,round(x$hap.prob,5),round(x$score.haplo,5),
round(x$score.haplo.p,5))
if(x$n.sim>0) tbl <- cbind(tbl,x$score.haplo.p.sim)
ord <- order(x$score.haplo)
tbl <- tbl[ord,]
if(x$n.sim == 0) dimnames(tbl) <- list(NULL,c(x$locus.label,"Hap-Freq",
"Hap-Score","p-val"))
if(x$n.sim > 0) dimnames(tbl) <- list(NULL,c(x$locus.label,"Hap-Freq",
"Hap-Score","p-val","sim p-val"))
cat("Haplotype-specific Scores\n\n")
print(tbl,quote=F)
cat("\n\n")
invisible()
}
# 13-9-2003 start to implement
# 14-9-2003 in shape
# 21-9-2003 start extensive checking
# 23-9-2003 rewrite interface to genecounting
# 26-9-2003 done with successful use of by and order
# 17-10-2003 start to implement missing genotype code
# 18-9-2004 to fix S3 class hap.score
|
21a606fee04010515035544677f5fcc7c5206f2d
|
963b75306674956433ce16a562816de9bd4f9393
|
/man/persist.match.Rd
|
b9d595bf84b21df9e4a6d8c7334686cff5356180
|
[] |
no_license
|
cran/LogicForest
|
cd68de6f2bfa89b6cfc9fe49410b0ce20fa94f29
|
0619287ba164198feec015682d110a1bcdce58da
|
refs/heads/master
| 2021-01-20T04:32:33.071237
| 2014-09-18T00:00:00
| 2014-09-18T00:00:00
| 17,717,763
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 432
|
rd
|
persist.match.Rd
|
\name{persist.match}
\alias{persist.match}
\title{Internal Logic Forest Functions
}
\description{Internal function called by \code{\link{persistence.prep}}. Not intended to be used independently of this function.
}
\details{ Generates a list of all subset matches for variables or variable interactions in a logic forest model or an LBoost model.
}
\author{Bethany Wolf wolfb@musc.edu
}
\keyword{internal}
|
cd8380ff6dd2a7e1daceae1fee1a930a3001b53b
|
e2626d6d02b8533b9e3bc5ba2aa86afffe7a1758
|
/Script.R
|
92068af5e1825b1e8129f5864ac4c3c15cfda008
|
[] |
no_license
|
KHHaugen/Rclub
|
0bcd6b274caafda61cb3aedbe5decdba8316225c
|
450a99a023ecc91180424521feebcd06be4c411b
|
refs/heads/master
| 2020-05-17T08:30:39.401614
| 2019-04-26T10:21:30
| 2019-04-26T10:21:30
| 183,608,052
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,877
|
r
|
Script.R
|
## Spatial models: R club 26/04 2019 Kristian H. Haugen
# Packages ----------------------------------------------------------------
library(pacman)
p_load(data.table, sp, spdep, ggplot2)
# Loading data ----------------------------------------------------------------
data <- fread('https://raw.githubusercontent.com/KHHaugen/RURED/master/grunnstruktur%20med%20geokoder.csv')
# Prepping data ----------------------------------------------------------------
# Removing schools located outside the country
data$lon[data$lat < 57.5] <- NA
data$lat[data$lat < 57.5] <- NA
data$lat[(data$lat < 66 | data$lat > 72) & data$lon > 15] <- NA
data$lon[is.na(data$lat)] <- NA
data <- data[!is.na(data$lat), ]
# Removing Språksenteret and schools with reported 0 students
data <- data[data$Orgnr != 813993422,]
data <- data[data$grpst2 != 0,]
## The data is panel data, and we need it to be cross sectional. We therefore aggregate the data.
data <- aggregate(data[,c('grpoeng', 'grpst2', 'utdanning_komb', 'folkemengde', 'eleverN', 'lon', 'lat')], list(data$Orgnr, data$Navn), mean, na.rm=T)
data <- data[!is.na(data$utdanning_komb),]
data <- data[!is.na(data$grpst2),]
data <- data[!is.na(data$grpoeng),]
data$folkemengde[data$folkemengde == 0] <- NA
data <- data[!is.na(data$folkemengde),]
# Letting R know it's spatial data and what system is used ----------------------------------------------------------------
# Defining coordinates and CRS
sp_point <- cbind(data$lon, data$lat)
proj <- CRS("+init=epsg:4326")
# We are working with points, and a problem arise when two points are exactly the same.
# This is an issue here, and one way to handle this is to set an offset to the location of duplicates.
sp_point[,2][duplicated(sp_point)] <- sp_point[,2][duplicated(sp_point)] + 0.00001
data.sp <- SpatialPointsDataFrame(coords=sp_point,data,proj4string=proj)
data.sp <- spTransform(data.sp, CRS("+init=epsg:25833"))
map_crd <- coordinates(data.sp)
rownames(map_crd) <- data.sp$id
# Defining neighbors ----------------------------------------------------------------
k <- 3
W_knn1 <- knn2nb(knearneigh(map_crd, k=k))
W_knn1_mat <- nb2listw(W_knn1, style = 'W', zero.policy = TRUE)
# Displaying connectivity ----------------------------------------------------------------
plot(W_knn1_mat,coords=map_crd,pch=19, cex=0.1, col="gray")
# Testing for global spatial correlation ----------------------------------------------------------------
moran.test(data$grpoeng, listw = W_knn1_mat)
# Showing distribution of GPA
data.sp$percentile<-(rank(data.sp$grpoeng)/length(data.sp$grpoeng))*100
#mid <- mean(data.sp$grpoeng)
GPA <- ggplot(as.data.frame(data.sp), aes(map_crd[,1], map_crd[,2])) +
geom_point(aes(colour = percentile),
size = 0.5) +
scale_color_gradient(low="red", high="blue") +
labs(title = 'GPA of schools', x = 'lon', y = 'lat') +
theme(axis.title.x=element_blank(),axis.text.x=element_blank(),axis.ticks.x=element_blank(),
axis.title.y=element_blank(),axis.text.y=element_blank(),axis.ticks.y=element_blank())
GPA
# Models ----------------------------------------------------------------
mod1 <- lm(grpoeng ~ grpst2 + I(log(folkemengde)) + utdanning_komb, data = data) # OLS
mod2 <- errorsarlm(grpoeng ~ grpst2 + I(log(folkemengde)) + utdanning_komb, data = data, listw = W_knn1_mat) # simultaneous autoregressive model
mod3 <- lagsarlm(grpoeng ~ grpst2 + I(log(folkemengde)) + utdanning_komb, data = data, listw = W_knn1_mat) # Spatially lagged dependent variable
summary(mod1)
summary(mod2)
summary(mod3)
# Geostatistical models ----------------------------------------------------------------
# Geostatistical models are spatial models where the connectivity is based on distance, and not on neighbors necessarily.
dist <- unlist(nbdists(W_knn1, map_crd)) # Unlisting to use the information
distance <- dnearneigh(map_crd, d1=0, d2=max(dist)) # Calculating distances. d1 and d2 creates a band of distances to be used. Here the band is set so that all points have a weight, but it can be set so that for example only points in a set distance will be used.
distance.neigh.list <- nbdists(distance, coordinates(map_crd))
W_inv.dist <- lapply(distance.neigh.list, function(x) 1/x) # Weighting on the inverse of the distance. The points nearer to will have a larger weight than points further away
W_inv.distance <- nb2listw(distance, glist=W_inv.dist, style="W")
mod4 <- errorsarlm(grpoeng ~ grpst2 + I(log(folkemengde)) + utdanning_komb, data = data, listw=W_inv.distance, zero.policy=T) # Geostatistical error correcting model
mod5 <- lagsarlm(grpoeng ~ grpst2 + I(log(folkemengde)) + utdanning_komb, data = data.sp, listw=W_inv.distance, zero.policy=T) # Geostatistical spatially lagged dependent
summary(mod4)
summary(mod5)
|
fd80e198e4dd9f63825e70a0294777200d4f48c7
|
1f9039e664ab3bb9df978a5ad05d60b69ca7095d
|
/FLUXNET_Gs_PMinv.R
|
62cdcdf0f523f99b661f1f9b44dfdf31cb8a53da
|
[] |
no_license
|
l5d1l5/Gs_FLUXNET2015
|
5dd8b093f0167aee81739e174f97d5d1ac412ed8
|
8afce8aba86ca35f7fe20a6f275ee686474cc198
|
refs/heads/master
| 2022-03-14T02:59:31.187417
| 2018-03-11T04:26:24
| 2018-03-11T04:26:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,605
|
r
|
FLUXNET_Gs_PMinv.R
|
# Calculate hourly surface conductance
#####
rm(list = ls())
library(lubridate)
###############
# define functions
###############
# slope of the Ta-VPD curve (KPa K-1)
delta_fun <- function(Ta){
# input - air temperature, (C)
# output - slope of T-es curve (KPa*K-1)
h2osat_fun <- function(x){
h2o_base = 0.61121
h2o_mult = 17.502
h2o_add = 240.97
esT = h2o_base*exp((h2o_mult*x)/(h2o_add+x))
return(esT)
}
psi_fun <- function(x,y){
psi_mult = 240.97*17.502
psi_add = 240.97
delta = (psi_mult*x)/((y+psi_add)^2)
return(delta)
}
#
esT = h2osat_fun(Ta) # h2o saturation
delta = psi_fun(esT, Ta)
return(delta)
}
# Calculate aerodynamic conductance (m s-1)
Ga_calfromflx_fun <- function(u, ustar){
ra = u/(ustar^2) + 6.2*ustar^(-2/3)
Ga = 1/ra;
return(Ga)
}
# Calculate surface conductance from FLUXNET data (m s-1)
Gs_calfromflx_fun <- function(VPD, A, ro, Cp, Ga, delta, gamma, LEobs){
# calculate surface conductance by using Penman-Monteith equation
# Estimate the Gcx by inverting P-M equation
# VPD - vapour pressure deficit kPa
# A - available energy W*m-2
# ro - air density
# Cp -
# Ga - aerodynamic conductance
# delta -
# gamma -
# LEobs - LE observations W*m-2
#
ratio = delta/gamma
Gs = LEobs*Ga/(ratio*A - (ratio+1)*LEobs + Ga*ro*Cp*VPD/gamma)
return(Gs)
}
################
# end of definitions of functions
################
#######
# define constant
# filling values
mfill = -9999
# psychrometric constant
gamma = 0.066 # kPa*K-1
# air density
ro = 1.15 # kg*m-3
# specific heat capacity of air
Cp = 1012 # J*kg-1*K-1
# gas constant
Rgas <- 8.3144598 # J mol-1 K-1
###
# file paths and names
infilepath1 = 'F:/FLUXNET2015_Nov/'
infilepath4 = 'F:/FLUXNET2015_PATCH1/' # patch for flux data quality controls
infilename2 <- 'G:/FLUXNET2015_siteinfo.csv'
outfilepath1 <- 'G:/FLUXNET2015_Gs/'
###
# site information
# read site information in a file
siteinfo <- read.table(infilename2, header=T, sep=',', stringsAsFactors=F)
sitepft <- siteinfo$pft
siteid <-siteinfo$site
nsite = length(siteid)
###
time.start <- Sys.time()
###
# save peak-gpp month data
df.out <- data.frame()
for(i in 2:nsite){
######
# surface conductance under dry periods
######
# read hourly flux data
flxfiledir1 = list.files(infilepath1, pattern = paste0('FLX_',siteid[i],'.*'))
# no such site file, exit and go to next step
if (isTRUE(all.equal(flxfiledir1, character(0)))) next
flxfiledir2hr = list.files(paste0(infilepath1, flxfiledir1), pattern = '.*_FULLSET_H{1}')
flxT_hr = read.csv(paste0(infilepath1,flxfiledir1,'/',flxfiledir2hr))
## get timestamp of hourly data
## First get this because 'flx_year' will be used
time_hr = flxT_hr$TIMESTAMP_START
flx_year_hr = floor(time_hr/100000000)
flx_month_hr = floor((time_hr-flx_year_hr*100000000)/1000000)
flx_day_hr <- floor((time_hr-flx_year_hr*100000000-flx_month_hr*1000000)/10000)
flx_year = unique(flx_year_hr)
nyear = length(flx_year) # number of years
# get hourly steps
time_step = (time_hr[2]-time_hr[1])/100
n_steps_day = ifelse(time_step==1, 24, 48)
# get site PFT
######
# read daily flux data
flxfiledir2d <- list.files(paste0(infilepath1, flxfiledir1), pattern = '.*_FULLSET_DD')
flxT_d <- read.csv(paste0(infilepath1,flxfiledir1,'/',flxfiledir2d))
# get timestamp of daily data
time_d <- flxT_d$TIMESTAMP
flx_year_d <- floor(time_d/10000)
flx_month_d <- floor((time_d - flx_year_d*10000)/100)
######
# read monthly flux data
flxfiledir2mo <- list.files(paste0(infilepath1, flxfiledir1), pattern = '.*_FULLSET_MM')
flxT_mo <- read.csv(paste0(infilepath1,flxfiledir1,'/',flxfiledir2mo))
# timestamp of monthly data
time_mo <- flxT_mo$TIMESTAMP
flx_year_mo <- floor(time_mo/100)
flx_month_mo <- (time_mo-flx_year_mo*100)
#######
# Deal with QC problem in heat flux data
# Heat flux QC of hourly data
hflxdir1 = list.files(infilepath4, pattern = paste0('FLX_',siteid[i],'_FLUXNET2015_PATCH1_H{1}'))
hflxqc_hr = read.csv(paste0(infilepath4, hflxdir1))
timeqc_hr <- hflxqc_hr$TIMESTAMP_START
Hqc_hr = hflxqc_hr$H_F_MDS_QC
LEqc_hr = hflxqc_hr$LE_F_MDS_QC
# years of hourly QC
qc_year_hr <- floor(timeqc_hr/100000000)
# Heat flux QC data in daily scale
hflxdir2 <- list.files(infilepath4,pattern=paste0('FLX_',siteid[i],'_FLUXNET2015_PATCH1_DD'))
hflxqc_d <- read.csv(paste0(infilepath4, hflxdir2))
timeqc_d <- hflxqc_d$TIMESTAMP
Hqc_d <- hflxqc_d$H_F_MDS_QC
LEqc_d <- hflxqc_d$LE_F_MDS_QC
# years of daily QC
qc_year_d <- floor(time_d/10000)
######
# 'clean' data
# e.g. site 'NL-Loo' data lacks the last record
# A ROBUST way to clean data
# assume: problem with the last year data
# hourly data: 24 or 48 data in the last day
# daily data: 31 days in December
# monthly data: December is the last month
# annual data: QC data and original data have the same size
ndif_hr <- n_steps_day-
length(which(flx_year_hr==flx_year[nyear]&flx_month_hr==12&flx_day_hr==31))#
if (ndif_hr>0) {
# less than 24 or 48
# add rows
newrows <- array(dim = c(ndif_hr,dim(flxT_hr)[2]))
newrows <- as.data.frame(newrows)
names(newrows) <- names(flxT_hr) # have the same names
flxT_hr <- rbind(flxT_hr, newrows)
}
#
ndif_d <- 31 -
length(which(flx_year_d==flx_year[nyear]&flx_month_d==12))
if (ndif_d>0) {
newrows <- as.data.frame(array(dim = c(ndif_d, dim(flxT_d)[2])))
names(newrows) <- names(flxT_d)
flxT_d <- rbind(flxT_d, newrows)
}
#
ndif_mo <- 12 -
length(which(flx_year_mo==flx_year[nyear]))
if (ndif_mo>0) {
newrows <- as.data.frame(array(dim = c(ndif_mo, dim(flxT_mo)[2])))
names(newrows) <- names(flxT_mo)
flxT_mo <- rbind(flxT_mo, newrows)
}
#
# same as length of FLUXNET hourly data
Hqc_hr <- Hqc_hr[1:length(flxT_hr[,1])]
LEqc_hr <- LEqc_hr[1:length(flxT_hr[,1])]
# same as length of FLUXNET daily data
Hqc_d <- Hqc_d[1:length(flxT_d[,1])]
LEqc_d <- LEqc_d[1:length(flxT_d[,1])]
######
# read yearly flux data
#####
# hourly data: obtain measured and good-quality data
# carbon fluxes
# NEE/NEP
NEE_hr = flxT_hr$NEE_VUT_REF
NEEqc_hr = flxT_hr$NEE_VUT_REF_QC
NEE_hr[NEE_hr==mfill|NEEqc_hr>1] = NA
NEP_hr = -NEE_hr
# GPP
GPP_hr = flxT_hr$GPP_NT_VUT_REF
GPPqc_hr = flxT_hr$NEE_VUT_REF_QC
GPP_hr[GPP_hr==mfill|GPPqc_hr>1] = NA
# RE
RE_hr = flxT_hr$RECO_NT_VUT_REF
REqc_hr = flxT_hr$NEE_VUT_REF_QC
RE_hr[RE_hr==mfill|REqc_hr>1] = NA
# water fluxes
# LE
LE_hr = flxT_hr$LE_F_MDS
LE_hr[LE_hr==mfill|LEqc_hr>1] = NA
# sensible heat fluxes (H)
H_hr = flxT_hr$H_F_MDS
H_hr[H_hr==mfill|Hqc_hr>1] = NA
# radiation fluxes
SW_hr = flxT_hr$SW_IN_F_MDS
SWqc_hr = flxT_hr$SW_IN_F_MDS_QC
SW_hr[SW_hr==mfill|SWqc_hr>1] = NA
# soil heat flux
G_hr = flxT_hr$G_F_MDS
Gqc_hr = flxT_hr$G_F_MDS_QC
G_hr[G_hr==mfill|Gqc_hr>1] = NA
# net radiation
Rn_hr = flxT_hr$NETRAD
Rn_hr[Rn_hr==mfill] = NA
if (isTRUE(all.equal(Rn_hr,logical(0)))) {
print(paste0(siteid[i],' no net radiation'))
next
}
# air temperature, C
Ta_hr = flxT_hr$TA_F_MDS
Taqc_hr = flxT_hr$TA_F_MDS_QC
Ta_hr[Ta_hr==mfill|Taqc_hr>1] = NA
# VPD, hPa
VPD_hr = flxT_hr$VPD_F_MDS
VPDqc_hr = flxT_hr$VPD_F_MDS_QC
VPD_hr[VPD_hr==mfill|VPDqc_hr>1] = NA
VPD_hr = VPD_hr/10 # KPa
# precipitation
P_hr = flxT_hr$P_F
Pqc_hr = flxT_hr$P_F_QC
# soil moisture (NOTE: this could be NULL)
SWC_hr = flxT_hr$SWC_F_MDS_1
SWCqc_hr = flxT_hr$SWC_F_MDS_1_QC
SWC_hr[SWC_hr==mfill|SWCqc_hr>1] = NA
###
# Sites have no SWC records
if (isTRUE(all.equal(SWC_hr,logical(0)))) {
# create a vector
SWC_hr <- rep(NA, length(NEP_hr))
}
# wind speed
u_hr <- flxT_hr$WS_F
uqc_hr <- flxT_hr$WS_F_QC
u_hr[u_hr==mfill|uqc_hr>0] = NA
# friction velocity
ustar_hr = flxT_hr$USTAR
ustar_hr[ustar_hr==mfill] = NA
# CO2 concentration
CO2_hr <- flxT_hr$CO2_F_MDS
co2qc_hr <- flxT_hr$CO2_F_MDS_QC
CO2_hr[CO2_hr==mfill|co2qc_hr>1] <- NA
# nighttime NEE
##########
# daily precipitation, mm
P_d <- flxT_d$P_F
P_d[P_d==mfill] <- NA
# daily soil water content, %
SWC_d <- flxT_d$SWC_F_MDS_1
SWCqc_d <- flxT_d$SWC_F_MDS_1_QC
SWC_d[SWC_d==mfill|SWCqc_d<0.7] <- NA
# daily carbon flux
# GPP
GPP_d <- flxT_d$GPP_NT_VUT_REF
GPPqc_d <- flxT_d$NEE_VUT_REF_QC
GPP_d[GPP_d==mfill] <- NA
# daily heat fluxes
# latent heat flux
LE_d <- flxT_d$LE_F_MDS
LE_d[LE_d==mfill|LEqc_d<0.7] <- NA
##########
# monthly CO2 concentration from FLUXNET
CO2_mo <- flxT_mo$CO2_F_MDS
CO2_mo[CO2_mo==mfill] <- NA
# monthly GPP
GPP_mo <- flxT_mo$GPP_NT_VUT_REF
GPP_mo[GPP_mo==mfill] <- NA
# monthly ET/LE
LE_mo <- flxT_mo$LE_F_MDS
LEqc_mo <- flxT_mo$LE_F_MDS_QC
LE_mo[LE_mo==mfill|LEqc_mo<0.7] <- NA
# monthly SWC
SWC_mo <- flxT_mo$SWC_F_MDS_1
SWCqc_mo <- flxT_mo$SWC_F_MDS_1_QC
SWC_mo[SWC_mo==mfill] <- NA
#####
# Calculate surface conductance
delta <- delta_fun(Ta_hr)
Ga_hr <- Ga_calfromflx_fun(u_hr,ustar_hr)
Gs_hr <- Gs_calfromflx_fun(VPD_hr,Rn_hr,ro,Cp,Ga_hr,delta,gamma,LE_hr)
# post-processing, data selection
# Gs_hr_neg <- Gs_hr # negative Gs
# Gs_hr_neg[Gs_hr_neg>=0] = NA
# Gs_hr_pos <- Gs_hr # positive Gs
# Gs_hr_pos[Gs_hr_pos<0] = NA
Gs_hr_filter <- Gs_hr # data selection
Gs_hr_filter[SW_hr<100|is.na(SW_hr)] <- NA # daytime data
Gs_hr_filter[GPP_hr<5|is.na(GPP_hr)] <- NA # positvie carbon uptake
Gs_hr_filter[Ta_hr<5|is.na(Ta_hr)] <- NA # extreme low air temperature
Gs_hr_filter[Gs_hr_filter<=0.000001] <- NA # unrealistic values
Gs_hr_filter[(Rn_hr-LE_hr)<5] <- NA # Rn generally larger than LE
#########
# Unit transformation m s-1 to mol m-2 s-1
# GPP - umolCO2 m-2 s-1
# VPD - KPa
# Ca - umol mol-1
# Gs - m s-1, turn into mol m-2 s-1
# P - Pa
# Rgas - J mol-1 K-1
# Tk - K
Tk_hr <- Ta_hr+273.15 # from C to K
PA_hr <- flxT_hr$PA_F*1000 # from kPa to Pa
PA_hr[PA_hr==mfill] <- NA
Ca_hr <- flxT_hr$CO2_F_MDS
Ca_hr[Ca_hr==mfill] <- NA
Gs_mol <- Gs_hr_filter*PA_hr/(Rgas*Tk_hr)
########
# remove impact of Precipitation
P_flag <- P_d
P_flag[] <- 0 # initiate an array for flagging
P1d_index <- which(P_d>0.1&!is.na(P_d))
P_flag[P1d_index] <- 1
P2d_index <- P1d_index+1 # remove the day after the rainy day
P2d_index[P2d_index>length(P_d)] <- length(P_d) # in case beyond the boundary
P_flag[P2d_index] <- 1
# from Daily to Hourly
P_flag_hr <- rep(P_flag, each=n_steps_day)
Gs_mol[P_flag_hr==1] <- NA
############
## save results
## save estimated surface conductances for each site
## create a data frame
time_start <- flxT_hr$TIMESTAMP_START
# time_end <- flxT_hr$TIMESTAMP_END
Gs_df <- data.frame(time_start,Ta_hr,u_hr,ustar_hr,VPD_hr,Rn_hr,ro,Cp,Ga_hr,delta,gamma,LE_hr, # variables for Gs estimation
Gs_mol,NEE_hr,GPP_hr,RE_hr,SWC_hr) # Gs and other variables
outfilename1 <- paste0(outfilepath1, siteid[i], '_Gs_hr.csv')
write.table(Gs_df, outfilename1, row.names = F, sep = ',')
}
time.end <- Sys.time()
|
b9468a85da22520bd99495d60bd75cd9fcf38302
|
1499dd7ea7db28073afe87446e84442de9e7ad87
|
/R/new_project.R
|
342da9ab0d232f5fb56ca0d2fef28729a006aec2
|
[] |
no_license
|
sal-medstats/ctru
|
16b21c7f187c4589e2487733dbba3706e913ff4e
|
971c7c7da6e9463776af6f681fe2aac97c1239be
|
refs/heads/master
| 2020-06-13T11:46:19.314895
| 2019-07-01T14:41:29
| 2019-07-01T14:41:29
| 194,643,154
| 0
| 0
| null | 2019-07-01T14:41:01
| 2019-07-01T09:33:45
|
R
|
UTF-8
|
R
| false
| false
| 1,208
|
r
|
new_project.R
|
#' Setup a new project directory
#'
#' @description Setup a project directory for a CTRU study.
#'
#' @details
#'
#' Creates a default project directory.
#'
#'
#' @param path Absolute or relative path to create the directory structure.
#'
#' @export
new_project<- function(path = '',
...){
## Detect what system we are on (determines how to create directories)
os <- Sys.info()[1]
if(os == 'Linux') mkdir <- 'mkdir -p '
else if(os == 'Windows') mkdir <- 'mkdir'
## Make a series of system calls to create directories
mkdir.lib <- paste0(mkdir, path, '/lib/')
system(paste0(mkdir.lib, 'data-raw'))
system(paste0(mkdir.lib, 'data'))
system(paste0(mkdir.lib, 'R'))
system(paste0(mkdir.lib, 'vignette'))
system(paste0(mkdir.lib, '/inst/shiny/', path))
mkdir.doc <- paste0(mkdir, path, '/doc/')
system(paste0(mkdir.doc, 'word/sap'))
system(paste0(mkdir.doc, 'excel'))
system(paste0(mkdir, path, 'tmp'))
system(paste0(mkdir, path, 'ref'))
system(paste0(mkdir, path, 'doc'))
## Create/copy template files
system(paste0('touch ', path, ' README.md'))
system(paste0('touch ', path, ' lib/DESCRIPTION'))
}
|
dc5348bd9da5b499a9101730d738c9c2c1a39c9e
|
007bb51eb8faec440faa73b8bfeb8749e5cb9faf
|
/Cropyields.R
|
5a065b7124f1f896d194f55f015ac3242fec7f13
|
[] |
no_license
|
DanMungai1/TidyTuesday
|
3dad84b1ec974bef17ce95ae0d6080710dda8c91
|
843c310d15ca7f0667831270a2d4a5c0f897b614
|
refs/heads/master
| 2023-01-22T16:46:41.992464
| 2020-11-17T18:19:09
| 2020-11-17T18:19:09
| 282,913,158
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,061
|
r
|
Cropyields.R
|
#Load Packages
library(tidyverse)
library(ggthemes)
##Loading Data
tuesdata <- tidytuesdayR::tt_load(2020, week = 36)
key_crop_yields <- tuesdata$key_crop_yields
fertilizer <- tuesdata$cereal_crop_yield_vs_fertilizer_application
Kenya <- key_crop_yields %>%
pivot_longer(cols = 4:14, names_to = "produce", values_to = "production") %>%
filter(Entity == "Kenya",
!produce %in% c("Soybeans (tonnes per hectare)",
"Cocoa beans (tonnes per hectare)",
"Peas (tonnes per hectare)"),
!Year <= 1999)
Kenya1 <- key_crop_yields %>%
janitor::clean_names() %>%
rename_with(~ str_remove(., "_tonnes_per_hectare")) %>%
pivot_longer(cols = 4:14, names_to = "produce", values_to = "production") %>%
filter(entity == "Kenya",
produce %in% c("bananas", "potatoes"),
!year <= 1999)
KenyaUganda <- key_crop_yields %>%
janitor::clean_names() %>%
rename_with(~ str_remove(., "_tonnes_per_hectare")) %>%
pivot_longer(cols = 4:14, names_to = "produce", values_to = "production") %>%
filter(entity %in% c("Kenya", "Uganda"),
produce %in% c("bananas", "potatoes"),
!year <= 1999)
ggplot(filter(KenyaUganda, produce == "bananas"), aes(factor(year), production, fill = entity)) +
geom_bar(stat = "identity", position = "dodge") +
labs(title = "Banana Production in Kenya and Uganda",
x = "Year",
y= "Production Per Hectare") + theme_fivethirtyeight() +
ggsave("KenyaUgandabanana.png")
ggplot(filter(KenyaUganda, produce == "potatoes"), aes(factor(year), production, fill = entity)) +
geom_bar(stat = "identity", position = "dodge") +
labs(title = "Potatoes Production in Kenya and Uganda",
x = "Year",
y= "Production Per Hectare") + theme_fivethirtyeight() +
ggsave("KenyaUgandapotatoes.png")
ggplot(filter(Kenya1, produce == "bananas"), aes(factor(year), production)) + geom_point()+
geom_line(group = 2)+
labs(title = "Banana Production in Kenya",
x = "Year",
y= "Production Per Hectare") + theme_fivethirtyeight() +
ggsave("Banana.png")
ggplot(filter(Kenya1, produce == "potatoes"), aes(factor(year), production)) + geom_point()+
geom_line(group = 2)+
labs(title = "Potatoes Production in Kenya",
x = "Year",
y= "Production Per Hectare") + theme_fivethirtyeight()+
ggsave("potatoes.png")
ggplot(Kenya, aes(factor(Year), production, fill = produce)) +
geom_bar(stat = "identity") + coord_flip() +
facet_wrap(~produce, scales = "free") +
labs(title = "Crop production in Kenya",
subtitle = "Crop Production over the last 20 years",
caption = "courtesy of Dan Mungai")+
guides(fill = "none") +
theme_fivethirtyeight() +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5)) +
ggsave("Different-crops.png")
ggplot(Kenya, aes(factor(Year), production, fill = produce)) +
geom_bar(stat = "identity") + coord_flip() +
labs(title = "Crop production in Kenya",
subtitle = "Crop Production over the last 20 years")+
guides(fill = "none") +
theme_fivethirtyeight() +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5)) +
ggsave("General-trend.png")
###Fertilizer
KenyaFert <- fertilizer %>% janitor::clean_names() %>%
filter(entity == "Kenya",
year >= "2000") %>%
rename(fertilizer = nitrogen_fertilizer_use_kilograms_per_hectare,
cereal_yield = cereal_yield_tonnes_per_hectare) %>%
mutate(fertilizer = fertilizer/1000,
ratio = fertilizer/cereal_yield) %>%
ggplot(aes(factor(year), ratio)) + geom_bar(stat = "identity") +
labs(title = "The fertilizer to cereal yield Ratio",
x = "year", y= "fertilizer:cereal yield ratio")+
theme_fivethirtyeight() +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5)) +
ggsave("fertilizercereal-yield-ratio.png")
|
f42c212ebdba3066c57b53b34296a76417b80bc4
|
bff934b8e0f51eadce86bcb5c19597bce927480c
|
/tests/testthat/helper-set-token.R
|
2592c4c3ffab4a1dd6e285584a5cc51057a8f10d
|
[] |
no_license
|
richierocks/yelp
|
84436adc4e9a85b035a01a8543d489cdb9b71d9c
|
7b9bce77d12ad9bb7af2919477560e6a877367f6
|
refs/heads/master
| 2021-05-08T09:47:07.291829
| 2018-10-16T00:21:15
| 2018-10-16T00:21:15
| 106,317,783
| 6
| 4
| null | 2023-02-06T10:57:55
| 2017-10-09T18:04:36
|
R
|
UTF-8
|
R
| false
| false
| 333
|
r
|
helper-set-token.R
|
set_token <- function() {
old_token <- Sys.getenv("YELP_ACCESS_TOKEN", NA)
if(is.na(old_token)) {
Sys.setenv(
YELP_ACCESS_TOKEN = readRDS(test_path("sample_yelp_access_token.rds"))
)
}
invisible(old_token)
}
unset_token <- function(token) {
if(nzchar(token)) {
Sys.setenv(YELP_ACCESS_TOKEN = token)
}
}
|
9ec5d478891738ef97ea3f41dda3a0caf4ac8316
|
62c7a28fcf9a9bb29368b2a733ed86819dbae7a8
|
/Ex9.R
|
93a87b779713262f322bf7171d350ec76cee9d75
|
[] |
no_license
|
andrewmackinn/Biocomp-Fall2018-181102-Exercise9
|
4a6b7c6dde85b161fbe7fce0aa1f90e7f4b27c9a
|
db71a61fdfc8646ba2bfc4044f1ee6281c6cd6fc
|
refs/heads/master
| 2020-04-04T11:14:29.816797
| 2018-11-09T14:28:01
| 2018-11-09T14:28:01
| 155,883,631
| 0
| 0
| null | 2018-11-02T15:15:23
| 2018-11-02T15:15:22
| null |
UTF-8
|
R
| false
| false
| 1,105
|
r
|
Ex9.R
|
# online dataset, comparison of budged and domestic gross
#reading dataframe
data = read.csv("moviedata.csv", header = TRUE, stringsAsFactors= FALSE )
head(data)
#load the needed packages
library(ggplot2)
library(gridBase)
library(gridExtra)
#get data into the correct forms
data$Budget = as.numeric(as.character(data$Budget))
data$Domestic.Gross = as.numeric(as.character(data$Domestic.Gross))
data$Movie = as.factor(data$Movie)
#make the plots
ggplot(data= data, aes(x = Budget, y = Domestic.Gross)) +
geom_point() +
stat_smooth(method = "lm", se = F)
#create object with the data
sample = read.table("data.txt", header = TRUE, stringsAsFactors = FALSE, sep = ",")
#create bar plot
b = ggplot(data = sample)
b+geom_bar(aes(x=as.factor(region), y=observations), stat = "summary", fun.y ="mean", fill ='black', color = 'black') + theme_classic()
#create scatter plot with jitter
s = b +geom_jitter(aes(x = as.factor(region), y = observations))
s
#Using scatter plot does make them look considerably differnt. Bar plot makes them all look equal, but scatter plot reveals different distribution.
|
ded54e016757145035c7db1183cf83bbb114c89f
|
9522e1c12bb96df3e1889017e465cc3220e75ea3
|
/R/RandomUnifArray-class.R
|
5591e03a382a4f41ab5beea2b0f0eac3bdf8b23c
|
[] |
no_license
|
LTLA/DelayedRandomArray
|
66b577d28a964d8f387172715a37b4f1785b3205
|
a44a55818dd26cf0c792b3696d6f565eea15e60f
|
refs/heads/master
| 2023-04-22T13:04:59.328711
| 2021-04-29T20:30:03
| 2021-04-29T20:30:03
| 357,088,359
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,439
|
r
|
RandomUnifArray-class.R
|
#' DelayedArray of random uniform values
#'
#' A \linkS4class{DelayedArray} subclass that performs on-the-fly sampling of uniformly distributed values.
#'
#' @param dim Integer vector of positive length, specifying the dimensions of the array.
#' @param min,max Numeric vector used as \code{min} and \code{max}, respectively, in \code{\link{qunif}}.
#' Alternatively, a numeric array-like object with the same dimensions as \code{dim}.
#' @param chunkdim Integer vector of length equal to \code{dim}, containing the dimensions of each chunk.
#' @param seed A RandomUnifArraySeed object.
#'
#' @return
#' All constructors return an instance of a RandomUnifArray object,
#' containing random draws from a uniform distribution with the specified parameters.
#'
#' @author Aaron Lun
#'
#' @aliases
#' RandomUnifArray-class
#' RandomUnifArraySeed-class
#' RandomUnifMatrix-class
#' sampleDistrParam,RandomUnifArraySeed-method
#' sampleDistrFun,RandomUnifArraySeed-method
#' matrixClass,RandomUnifArray-method
#'
#' @seealso
#' The \linkS4class{RandomArraySeed} class, for details on chunking and the distributional parameters.
#'
#' @examples
#' X <- RandomUnifArraySeed(c(1e5, 1e5))
#' Y <- DelayedArray(X)
#' Y
#'
#' # Fiddling with the distribution parameters:
#' X2 <- RandomUnifArraySeed(c(1e5, 1e5), min=1:1e5, max=1:1e5*2)
#' Y2 <- DelayedArray(X2)
#' Y2
#'
#' # Using another array as input:
#' library(Matrix)
#' min <- rsparsematrix(1e5, 1e5, density=0.00001)
#' X3 <- RandomUnifArraySeed(c(1e5, 1e5), min=min, max=DelayedArray(min)+1)
#' Y3 <- DelayedArray(X3)
#' Y3
#'
#' @docType class
#' @name RandomUnifArray-class
NULL
#' @export
#' @rdname RandomUnifArray-class
RandomUnifArraySeed <- function(dim, min=0, max=1, chunkdim=NULL) {
new("RandomUnifArraySeed", dim=dim, min=min, max=max, chunkdim=chunkdim)
}
#' @export
setMethod("sampleDistrParam", "RandomUnifArraySeed", function(x) c("min", "max"))
#' @export
setMethod("sampleDistrFun", "RandomUnifArraySeed", function(x) stats::qunif)
#' @export
setMethod("matrixClass", "RandomUnifArray", function(x) "RandomUnifMatrix")
#' @export
#' @rdname RandomUnifArray-class
setMethod("DelayedArray", "RandomUnifArraySeed", function(seed) new_DelayedArray(seed, Class="RandomUnifArray"))
#' @export
#' @rdname RandomUnifArray-class
RandomUnifArray <- function(dim, min=0, max=1, chunkdim=NULL) {
DelayedArray(RandomUnifArraySeed(dim, min, max, chunkdim=chunkdim))
}
|
c4f27f6634461c31dbd6ea01a09f12fdb040a102
|
af419fb17048e90038745b883ec30e1ee23ac5ff
|
/code/retrieving_data.R
|
6933c4518fae9b14be5967419c516771ec2b4b8c
|
[] |
no_license
|
vfuentesc/HW8_Choropleth_Map
|
53ffa3430a886969e79b1e44892a9553e93d0473
|
91ae1cfe0b3f294a78f76e9c745f2ea6c47129b3
|
refs/heads/main
| 2023-04-21T23:38:04.329508
| 2021-05-21T07:10:53
| 2021-05-21T07:10:53
| 369,435,114
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,646
|
r
|
retrieving_data.R
|
###################################
### README ########################
###################################
# The following code retrieves:
## (1) 2020 Presidential elections results by county
## (2) Employed/unemployed people by county by month for 2019-2020 years
rm(list=ls())
# Loading Libraries
library(tidyverse)
library(rvest)
library(stringr)
library(lubridate)
library(blsAPI)
library(jsonlite)
# Setting Up WD
wd <- "C:/Users/fuent/OneDrive - The University of Chicago/Spring 2021/Data Visualization/Week 8/HW8"
setwd(wd)
# 1 -- 2020 Presidential Elections Results by county
# -- Data developed using The Guardian, townhall.com, Fox News, Politico, and the New York Times
# -- More details: https://github.com/tonmcg/US_County_Level_Election_Results_08-20
if (!file.exists(file.path("data", "elections_2020.csv")))
download.file("https://raw.githubusercontent.com/tonmcg/US_County_Level_Election_Results_08-20/master/2020_US_County_Level_Presidential_Results.csv",
destfile = "elections_2020.csv", mode = "wb")
elections_2020 <- read_csv(file.path("data", "elections_2020.csv"))
# 2 -- Employment/Unemployment data by county
# -- Data retrieved from the BLS using BLSapi
# -- More details: https://cran.r-project.org/web/packages/blsAPI/blsAPI.pdf
if (!file.exists(file.path("data", "laus_data.csv"))) {
bls_key <- "e54e539aed6045f29dbc51133d1ad7fe"
counties <- sort(unique(results_2020$GEOID))
seriesid <- c(paste("LAUCN", counties, "000000000", 4, sep = ""), # Employed
paste("LAUCN", counties, "000000000", 5, sep = "")) # Unemployed
list_laus <- list()
for (i in 1:( length(seriesid) / 50 + 1)) {
start <- (i - 1) * 50 + 1
end <- start + 49
if (end >= length(seriesid))
end <- length(seriesid)
payload <- list("seriesid" = seriesid[start:end],
"startyear" = 2019,
"endyear" = 2020,
"registrationKey" = bls_key)
list_laus[[i]] <- blsAPI(payload, api_version = 2, return_data_frame = TRUE)
if (i %% 50 == 0) # After 50 requests proceeds
Sys.sleep(15) # A waiting time due to API rule
cat("Retrieving:", paste0(end / length(seriesid) * 100, "%"), "\n")
}
laus_data <- do.call("rbind", list_laus)
write_csv(laus_data, "laus_data.csv")
}
# Calculating the Jan-Oct 2020 and Jan-Oct 2019 unemployment rates
laus_data <-
read_csv(file.path("data", "laus_data.csv")) %>%
mutate(seriesID = parse_number(seriesID), # seriesID contains: fips code & status of employment
GEOID = seriesID %/% 10000000000,
GEOID = str_pad(GEOID, width = 5, side = "left", pad = 0),
type = seriesID %% 10000000000,
type = case_when(type == 4 ~ "unemployed",
type == 5 ~ "employed")) %>%
filter(!period == c(11, 12)) %>% # Removing November and December months from both years
group_by(GEOID, year, type) %>%
summarise(jobs = mean(value, na.rm = TRUE)) %>% # Average by GEOID/year/type
mutate(share = jobs / sum(jobs, na.rm = TRUE)) %>%
ungroup() %>%
filter(type == "unemployed") %>%
pivot_wider(id_cols = GEOID, names_from = year, values_from = share, names_prefix = "unemployment_") %>%
mutate(delta_unemployment = unemployment_2020 - unemployment_2019)
unemployment_elections <-
laus_data %>%
left_join(elections_2020, by = c("GEOID" = "county_fips"))
write_csv(unemployment_elections, file.path("data", "unemployment_elections.csv"))
|
be999aa808297f9532d63a53420dbdabdf097d7f
|
a0414d8a9e187737f236f262148c3d721e11c600
|
/man/busca_fuzzy.Rd
|
5e5eba089503daacce4d668bb5e71beff294914f
|
[] |
no_license
|
courtsbr/JurisMiner
|
695fb93b58754ef94ee834cac7b1a4ec754da9e1
|
d94b3cbd9575bb1833fee5b4c8ed44624a969f1d
|
refs/heads/master
| 2023-06-22T21:57:27.847425
| 2023-06-21T11:36:51
| 2023-06-21T11:36:51
| 91,446,216
| 24
| 10
| null | null | null | null |
UTF-8
|
R
| false
| true
| 634
|
rd
|
busca_fuzzy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/busca_fuzzy.R
\name{busca_fuzzy}
\alias{busca_fuzzy}
\title{Procura a palavra ou frase do segundo vetor que melhor
se aproxima do primeiro. Particularmente útil para
comparar nomes de municípios.}
\usage{
busca_fuzzy(x, y)
}
\arguments{
\item{x}{Vetor de strings de referência.}
\item{y}{Vetor de strings a serem buscados.}
}
\value{
vetor com as strings de y próximos
de x.
}
\description{
Procura a palavra ou frase do segundo vetor que melhor
se aproxima do primeiro. Particularmente útil para
comparar nomes de municípios.
}
|
aa0f7483c94bb11670a9e349b62af4c00ce3fbc1
|
339cff1cc63bd09839168835c0190fefbd46db77
|
/R/Veterinarian_figs.R
|
e8ef59afc08e9860cbe662a95065dafa898300ee
|
[] |
no_license
|
cmzambranat/sci_adv_pandemics
|
4370c6a90fba91a87bfad24cd28b97db64d389e2
|
4ccb8b728ebf058ab6cba2d0a8118b35e79d3fb7
|
refs/heads/main
| 2023-05-28T17:27:57.049683
| 2021-06-17T07:30:40
| 2021-06-17T07:30:40
| 377,739,503
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,793
|
r
|
Veterinarian_figs.R
|
library(here)
library(tidyverse)
library(readxl)
library(lmodel2)
library(scales)
library(ggrepel)
library(countrycode)
library(styler)
# Global options
theme_set(
theme_bw(base_size = 18)
)
# read data
# World Bank data country classification
wb_class = read_xls(here("data/worldbank_classification.xls"), sheet = "Groups") %>%
filter(GroupCode == 'HIC' | GroupCode == 'LIC' | GroupCode == 'LMC' | GroupCode == 'UMC')
# Data provided by A. Dobson
# Add "latin1" encoding to remove unicode characters
# Remove . from column name
# Convert upper case to lower case in Country column
vet_data = read_csv(here('data/VetPerXNation2Reduced.csv'),
locale = readr::locale(encoding = "latin1")) %>%
mutate(plot_name = str_replace_all(Name, "[.]", "")) %>%
mutate(
Country = str_to_title(Country),
stand_country = str_to_title(rangeBuilder::standardizeCountry(Country, fuzzyDist = 25)),
is_bold = case_when(
stand_country == 'Spain' ~ 'bold',
stand_country == 'Uruguay' ~ 'bold',
stand_country == 'Falkland Islands' ~ 'bold',
stand_country == 'United States' ~ 'bold',
stand_country == 'United Kingdom' ~ 'bold',
stand_country == 'France' ~ 'bold',
stand_country == 'Venezuela' ~ 'bold',
stand_country == 'Canada' ~ 'bold',
stand_country == 'Mongolia' ~ 'bold',
stand_country == 'Cuba' ~ 'bold',
TRUE ~ 'plain'),
stand_country = case_when(
Country == 'Sthelena' ~ 'Saint Helena',
Country == 'Dr Congo' ~ 'Democratic Republic of the Congo',
TRUE ~ stand_country
),
continent = countrycode(stand_country, origin = 'country.name', destination = 'continent'),
region = countrycode(stand_country, origin = 'country.name', destination = 'region'),
iso3c = countrycode(stand_country, origin = 'country.name', destination = 'iso3c'),
new_country = case_when(
stand_country == 'Spain' ~ '(1) Spain',
stand_country == 'Uruguay' ~ '(2) Uruguay',
stand_country == 'Falkland Islands' ~ '(3) Falkland Islands',
stand_country == 'United States' ~ '(4) USA',
stand_country == 'United Kingdom' ~ '(5) UK',
stand_country == 'France' ~ '(6) France',
stand_country == 'Venezuela' ~ '(7) Venezuela',
stand_country == 'Canada' ~ '(8) Canada',
stand_country == 'Mongolia' ~ '(9) Mongolia',
stand_country == 'Cuba' ~ '(10) Cuba',
stand_country == 'Bosnia And Herzegovina' ~ 'BIH',
TRUE ~ stand_country)
) %>%
left_join(wb_class, by = c('iso3c' = 'CountryCode')) %>%
mutate(GroupName = case_when(
iso3c == 'FLK' ~ 'High income',
iso3c == 'GUF' ~ 'High income',
iso3c == 'GLP' ~ 'High income',
iso3c == 'MTQ' ~ 'High income',
iso3c == 'MYT' ~ 'High income',
iso3c == 'REU' ~ 'High income',
iso3c == 'SHN' ~ 'High income',
TRUE ~ GroupName
))
# Fit MA Type II regression
# Code provided by A. Dobson
VetxPopFit2 <- lmodel2(log(VetsStaff) ~ log(Pop20), data = vet_data, nperm = 99 )
VetxPopFit2
##
MAFit <- numeric(300)
MAFitU <- numeric(300)
MAFitL <- numeric(300)
MAFit <- (10^-2.6578) * vet_data$Pop20^0.8551
# Plot Number of Veterinarians, Population
vetxpop =
ggplot(vet_data) +
geom_point(aes(Pop20, VetsStaff, colour = 'black')) +
scale_x_log10(breaks = trans_breaks("log10", function(x) 10^x),
labels = trans_format("log10", math_format(10^.x))) +
scale_y_log10(breaks = trans_breaks("log10", function(x) 10^x),
labels = trans_format("log10", math_format(10^.x))) +
geom_line(aes(Pop20, MAFit, colour = 'red')) +
xlab("Population") +
ylab("Number of Veterinary Staff") +
scale_color_brewer(type = "qual",
palette = 'Set1',
labels = c("Observed data", "Type II: 0.855")) +
theme(legend.position = c(0.8, 0.2),
legend.title = element_blank(),
legend.key = element_blank(),
legend.background = element_blank(),
text = element_text(
face = 'bold',
family = 'Helvetica'
)
) +
annotation_logticks()
#
ggsave(here("figures/vet_pop_size.png"), vetxpop,
device = 'png', width = 2, height = 2, dpi = 300, scale = 4)
# Vets of the world plot
# Uses development version ggrepel 0.9.0
# Need to print directly on disk, otherwise will produce an error
vets_world =
vet_data %>%
filter(Country != "Sint Maarten") %>%
ggplot(aes(Area, VetsStaff / Pop20)) +
geom_point(aes(color = GroupName)) +
scale_x_log10(
breaks = trans_breaks("log10", function(x) 10 ^ x),
labels = trans_format("log10", math_format(10 ^ .x))
) +
scale_y_log10(
breaks = trans_breaks("log10", function(x) 10 ^ x),
labels = trans_format("log10", math_format(10 ^ .x))
) +
geom_text_repel(
aes(label = new_country, fontface = is_bold),
segment.color = 'grey50',
max.overlaps = 11,
#segment.curvature = -1e-20,
size = 3,
family = "Helvetica",
point.padding = 0.05
) +
xlab(bquote(bold("Area (km" ^ 2 ~ ")"))) +
ylab("Veterinary Staff / Citizen") +
theme(
text = element_text(
face = 'bold',
family = 'Helvetica'
),
legend.position = c(0.2, 0.12),
legend.title = element_blank()
) +
annotation_logticks() +
scale_color_brewer(
type = "qual",
palette = 'Set1',
breaks = c(
"Low income",
"Lower middle income",
"Upper middle income",
"High income"
),
labels = c(
"Low-income economies",
"Lower-middle-income economies",
"Upper-middle-income economies",
"High-income economies"
)
)
# Supplementary information, full size page
ggsave(here("figures/vet_citizen.png"), vets_world,
device = 'png', width = 11, height = 8.5, dpi = 300, scale = 1)
|
af08570d6e7f20983a5453227e3d02b8f0e96822
|
cc88465c29f245ff7f499a295ca62c1898fafd1e
|
/final_project_fxn.R
|
630e3e84fc2bb6ca7e7a4d690bdfe7f02e9bd512
|
[] |
no_license
|
alextsfox/LICOR_6400-Cleaner
|
5cc885c4973dd4881662f739baa6809427ede079
|
dd0fbd0f69c067c35af96c2c495e6eb2a2f60087
|
refs/heads/master
| 2022-03-27T12:03:31.394134
| 2020-01-14T19:34:05
| 2020-01-14T19:34:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,222
|
r
|
final_project_fxn.R
|
#LC takes IRGA data and removes all non-data rows. It also creates a Comment column that stores data taken after a Remark
LC<-function(dat){
#Renames the columns
names(dat)<- as.character(unlist(dat[min(which(dat[,1] == "Obs")),]))
#creates a comments column
dat$comment<-NA
dat<-dat[c(ncol(dat),1:(ncol(dat)-1))]
#strips out all Remarks that are blank
dat<-dat[-(which(dat[,2] == "Remark=" & dat[,3] == "")),]
#This is to put comments in comment column
for (i in 2:nrow(dat)){
ifelse(dat[i,2] == 'Remark=',
ifelse(grepl(pattern = "\"",dat[i,3]) == FALSE,
dat[i,1] <- dat[i,3],
ifelse(grepl(pattern = "=",dat[i,3]),
dat[i,1] <- dat[i-1,1],
ifelse(grepl(pattern = "Launched",dat[i,3]),
dat[i,1]<-dat[i-1,1], dat[i,1]<-paste(stringr::str_split(stringr::str_split(dat[i,3],":", simplify = TRUE)[3], " ", simplify= TRUE)[-1], collapse = " "))))
,dat[i,1] <- dat[i-1,1])
}
#####rows to exclude#####
#makes all numbers numeric and non numbers NAs
dat[,2]<-as.numeric(as.character(unlist(dat[,2])))
#removes all NA rows
dat<- dat[-c(which(is.na(dat[,2]))),]
return(dat)
}
#Get.LC.Files extracts all .csv files in a folder and puts them into one continuous data frame
Get.LC.Files<- function(wd = NULL, filetag = NULL, type = "csv"){
#this sets the wd to whichever file you want to extract files from
if (wd != FALSE){
setwd(wd)}
if (type != "csv"){
stop('files need to be .csv files')
}
#gets all files with the given file type
allFiles<-dir(pattern = cat(paste0('\'','.',type,"$",'\'')))
#filetag is the text shared among all files you wish to import
if (filetag != FALSE){
r<- which(str_dectect(allFiles,pattern = cat(paste0('\'',filetag,'\'')) ))
allFiles<-allFiles[r]
}
outDF<-data.frame()
for (i in 1:length(allFiles)){
#for the current file read it in
f<- read.delim(allFiles[i],sep = ",")
#append to the output
outDF<- dplyr::bind_rows(outDF,f)
}
return(outDF)
}
dat<-Get.LC.Files()
dat<-LC(dat)
|
a753e75c5bd230fe21c77d89c2c455efc5868737
|
dc5d0864d557e6113fe49a2c6e7318dd290cfd36
|
/man/bfsl_control.Rd
|
2ee5b7cd262c96c0933405965b02d1685ddb0117
|
[
"MIT"
] |
permissive
|
pasturm/bfsl
|
dd56af17394edee8d6c22a796dee9d00b6f22b7b
|
9096b1e682b4c007ee7bebf090f5a6797246e606
|
refs/heads/master
| 2022-09-11T22:34:49.292487
| 2022-08-26T13:44:03
| 2022-08-26T13:44:03
| 159,489,543
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 713
|
rd
|
bfsl_control.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bfsl.R
\name{bfsl_control}
\alias{bfsl_control}
\title{Controls the Iterations in the bfsl Algorithm}
\usage{
bfsl_control(tol = 1e-10, maxit = 100)
}
\arguments{
\item{tol}{A positive numeric value specifying the tolerance level for the
convergence criterion}
\item{maxit}{A positive integer specifying the maximum number of iterations allowed.}
}
\value{
A \code{list} with two components named as the arguments.
}
\description{
\code{bfsl_control} allows the user to set some characteristics of the \code{bfsl}
best-fit straight line algorithm.
}
\examples{
bfsl_control(tol = 1e-8, maxit = 1000)
}
\seealso{
\code{\link{bfsl}}
}
|
3014fa8360d2b7d39a30407e891e5336aba4ab60
|
590bf5c505c4ece3d1dfcfcc2ecc1a6313edf1ef
|
/R/ConcatActions_m0.R
|
3d22942c1d4bbbdca2eae152f95ba416c6668612
|
[] |
no_license
|
cran/LOGAN
|
509208570af1d762d243fd96e169341aa92e14b0
|
da705973772d32e74a6a6bc783f21aca80a1e038
|
refs/heads/master
| 2022-11-02T19:42:09.787980
| 2022-10-25T07:47:56
| 2022-10-25T07:47:56
| 184,891,067
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,524
|
r
|
ConcatActions_m0.R
|
#' Concatenate events
#'
#' This function allows you to concatenate event actions from diferent variables
#' in a unique vector.
#'
#' @param data A \code{matrix} or \code{data.frame} where the concatenated
#' events are
#' @param concat.events a vector where all the events are listed. Each element
#' of this vector needs to be of a \code{quo()} type.
#'
#' @return This function returns a \code{data.frame} with the concatenated
#' events in the 'event.type' variable.
#'
#' @details The output dataset will be identical to the input dataset, except
#' for the addition of one column in the end, called "event.type". Each row of
#' event.type contains the values of concat.events of all the rows.
#'
#' @examples
#' # Data preparation
#' df <- cp025q01
#' df$id <- paste(df[, 1], df[, 2], df[, 3], sep = "-")
#' df <- m0$TrimVar(df, c("event", "event_type", "diag_state"))
#'
#' # Function demonstration
#' df.conc <- m0$ConcatActions(df, c(rlang::quo(event), rlang::quo(event_type)))
#' names(df)
#' names(df.conc) # notice the extra variable in the end
#' table(df.conc$event.type)
#'
ConcatActions <- function(data, concat.events) {
event.type <- NULL # Workaround for "no visible binding for global variable"
for (i in seq(length(concat.events))) {
events <- concat.events[[i]]
if (i == 1) {
data <- dplyr::mutate(data, event.type = !!events)
} else {
data <- dplyr::mutate(data, event.type = paste0(
event.type, "_",
!!events
))
}
}
return(data)
}
|
8d5eea783e1417678ffc3c0f1d50535452f94d1f
|
55ba4622941c73a4f1002f9c2b57bf46b614aa3d
|
/code/Global_analysis/Create_boxplots.R
|
a199dd88a9cccaa976f3dcdb97e3df7d1c8ed93c
|
[
"MIT"
] |
permissive
|
malihhhh/CUIMC-NYP_COVID_autopsy_lung
|
2089693eeb0bd08973026578b285a8a16f266ec0
|
bf6fc4add36095c7bdc12b6e6ede33d768530bb7
|
refs/heads/main
| 2023-04-24T00:46:49.969287
| 2021-05-03T14:42:08
| 2021-05-03T14:42:08
| 548,264,345
| 1
| 0
|
MIT
| 2022-10-09T08:02:03
| 2022-10-09T08:02:02
| null |
UTF-8
|
R
| false
| false
| 20,356
|
r
|
Create_boxplots.R
|
#!/usr/bin/env Rscript
### title: Generate boxplots of cell type frequencies, grouped by either disease
### status, or sex of samples author: Yiping Wang date: 02/08/2021
consistentcolors = colors <- c("#006E82", "#AA0A3C", "#8214A0", "#00A0FA", "#FA5078",
"#005AC8", "#CC79A7", "#FAE6BE", "#0072B2", "#A0FA82", "#F0F032", "#0AB45A",
"#FA7850", "#14D2DC", "#FA78FA")
# calculate frequencies of cell_type_main classes in each sample
df_tobesummed = data.frame(orig.ident = data_lungs_all$orig.ident, group = data_lungs_all$group,
cell_type_main = data_lungs_all$cell_type_main, cell_type_fine = data_lungs_all$cell_type_fine,
cell_type_intermediate = data_lungs_all$cell_type_intermediate, immune_status = data_lungs_all$immune_status)
df_summed = df_tobesummed %>% group_by(orig.ident, cell_type_intermediate, cell_type_main,
immune_status, group) %>% tally()
df_summed = df_summed %>% group_by(orig.ident) %>% mutate(freq = n/sum(n))
# calculate frequencies of cell_type_fine classes in each sample
df_tobesummed_fine = data.frame(orig.ident = data_lungs_all$orig.ident, group = data_lungs_all$group,
cell_type_fine = data_lungs_all$cell_type_fine, immune_status = data_lungs_all$immune_status)
df_summed_fine = df_tobesummed_fine %>% group_by(orig.ident, cell_type_fine, immune_status,
group) %>% tally()
df_summed_fine = df_summed_fine %>% group_by(orig.ident) %>% mutate(freq = n/sum(n))
write.table(df_summed_fine, "boxplot_proportions_fine.csv", sep = ",", row.names = F,
col.names = T, quote = F)
# calculate frequencies of cell_type_main classes in each sample
df_tobesummed_main = data.frame(orig.ident = data_lungs_all$orig.ident, group = data_lungs_all$group,
cell_type_main = data_lungs_all$cell_type_main, immune_status = data_lungs_all$immune_status)
df_summed_main = df_tobesummed_main %>% group_by(orig.ident, cell_type_main, immune_status,
group) %>% tally()
df_summed_main = df_summed_main %>% group_by(orig.ident) %>% mutate(freq = n/sum(n))
# calculate frequencies of cell_type_intermediate classes in each sample
df_tobesummed_intermediate = data.frame(orig.ident = data_lungs_all$orig.ident, group = data_lungs_all$group,
cell_type_intermediate = data_lungs_all$cell_type_intermediate, immune_status = data_lungs_all$immune_status)
df_summed_intermediate = df_tobesummed_intermediate %>% group_by(orig.ident, cell_type_intermediate,
immune_status, group) %>% tally()
df_summed_intermediate = df_summed_intermediate %>% group_by(orig.ident) %>% mutate(freq = n/sum(n))
# calculate frequencies of cell_type_intermediate classes only within immune
# compartment
df_tobesummed_intermediate_immune = data.frame(orig.ident = data_lungs_all$orig.ident,
group = data_lungs_all$group, cell_type_intermediate = data_lungs_all$cell_type_intermediate,
immune_status = data_lungs_all$immune_status)
df_tobesummed_intermediate_immune = df_tobesummed_intermediate_immune[df_tobesummed_intermediate_immune$immune_status ==
"Immune", ]
df_summed_intermediate_immune = df_tobesummed_intermediate_immune %>% group_by(orig.ident,
cell_type_intermediate, immune_status, group) %>% tally()
df_summed_intermediate_immune = df_summed_intermediate_immune %>% group_by(orig.ident) %>%
mutate(freq = n/sum(n))
# calculate frequencies of cell_type_intermediate classes only within nonimmune
# compartment
df_tobesummed_intermediate_nonimmune = data.frame(orig.ident = data_lungs_all$orig.ident,
group = data_lungs_all$group, cell_type_intermediate = data_lungs_all$cell_type_intermediate,
immune_status = data_lungs_all$immune_status)
df_tobesummed_intermediate_nonimmune = df_tobesummed_intermediate_nonimmune[df_tobesummed_intermediate_nonimmune$immune_status ==
"Non-immune", ]
df_summed_intermediate_nonimmune = df_tobesummed_intermediate_nonimmune %>% group_by(orig.ident,
cell_type_intermediate, immune_status, group) %>% tally()
df_summed_intermediate_nonimmune = df_summed_intermediate_nonimmune %>% group_by(orig.ident) %>%
mutate(freq = n/sum(n))
# make boxplots of cell type frequencies in COVID-19 and Control, using cell type
# frequencies defined above
ggboxplot(df_summed_main, x = "cell_type_main", y = "freq", color = "group", add = "jitter") +
ylim(0, 0.8) + stat_compare_means(aes(group = group), label = "p.format", method = "wilcox.test") +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) + scale_colour_manual(values = consistentcolors[1:2])
ggsave(paste0("Figure_1E.pdf"), width = 11, height = 7)
ggboxplot(subset(df_summed_main, immune_status == "Immune"), x = "cell_type_main",
y = "freq", color = "group", add = "jitter") + ylim(0, 0.8) + stat_compare_means(aes(group = group),
label = "p.format", method = "wilcox.test") + theme(axis.text.x = element_text(angle = 90,
hjust = 1)) + scale_colour_manual(values = consistentcolors[1:2])
ggsave(paste0("cell_type_main_lungs_all_immune_boxplot.pdf"), width = 8, height = 7)
ggboxplot(subset(df_summed_main, immune_status == "Non-immune"), x = "cell_type_main",
y = "freq", color = "group", add = "jitter") + ylim(0, 0.8) + stat_compare_means(aes(group = group),
label = "p.format", method = "wilcox.test", size = 2) + theme(axis.text.x = element_text(angle = 90,
hjust = 1)) + scale_colour_manual(values = consistentcolors[1:2])
ggsave(paste0("cell_type_main_lungs_all_nonimmune_boxplot.pdf"), width = 8, height = 7)
# Extended Data Figure 2B
ggboxplot(df_summed_intermediate_nonimmune, x = "cell_type_intermediate", y = "freq",
color = "group", add = "jitter") + ylim(0, 1) + stat_compare_means(aes(group = group),
label = "p.format", method = "wilcox.test", size = 2, label.y = 0.9) + theme(axis.text.x = element_text(angle = 90,
hjust = 1)) + scale_colour_manual(values = consistentcolors[1:2])
ggsave(paste0("Extended_Data_Figure_2B.pdf"), width = 6,
height = 5)
# Extended Data Figure 2C
ggboxplot(df_summed_intermediate_immune, x = "cell_type_intermediate", y = "freq",
color = "group", add = "jitter") + ylim(0, 1) + stat_compare_means(aes(group = group),
label = "p.format", method = "wilcox.test", size = 2, label.y = 0.9) + theme(axis.text.x = element_text(angle = 90,
hjust = 1)) + scale_colour_manual(values = consistentcolors[1:2])
ggsave(paste0("Extended_Data_Figure_2C.pdf"), width = 6,
height = 5)
ggboxplot(df_summed_fine_fibroblast, x = "cell_type_fine", y = "freq", color = "group",
add = "jitter") + ylim(0, 0.8) + stat_compare_means(aes(group = group), label = "p.format",
method = "wilcox.test") + theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
scale_colour_manual(values = consistentcolors[1:2])
ggsave(paste0("cell_type_fine_lungs_all_fibroblast_boxplot.pdf"), width = 13, height = 7)
ggboxplot(df_summed_fine_fibroblast_norm_allcells, x = "cell_type_fine", y = "freq",
color = "group", add = "jitter") + ylim(0, 0.3) + stat_compare_means(aes(group = group),
label = "p.format", method = "wilcox.test") + theme(axis.text.x = element_text(angle = 90,
hjust = 1)) + scale_colour_manual(values = consistentcolors[1:2])
ggsave(paste0("cell_type_fine_lungs_all_fibroblast_norm_allcells_boxplot.pdf"), width = 13,
height = 7)
# Extended Data Figure 2A
ggboxplot(df_summed_intermediate, x = "cell_type_intermediate", y = "freq", color = "group",
add = "jitter") + ylim(0, 0.6) + stat_compare_means(aes(group = group), label = "p.format",
method = "wilcox.test", size = 2, label.y = 0.55) + theme(axis.text.x = element_text(angle = 90,
hjust = 1)) + scale_colour_manual(values = consistentcolors[1:2])
ggsave(paste0("Extended_Data_Figure_2A.pdf"), width = 12,
height = 5)
# make boxplot of all cell_type_intermediate frequencies in COVID-19 and Control, spread
# across two rows
allcelltypesintermediate = sort(unique(df_summed_intermediate$cell_type_intermediate))
df_summed_intermediate$class = 1
df_summed_intermediate$class[df_summed_intermediate$cell_type_intermediate %in% allcelltypesintermediate[11:20]] = 2
p1 = ggboxplot(df_summed_intermediate[df_summed_intermediate$class == 1, ], x = "cell_type_intermediate",
y = "freq", color = "group", add = "jitter") + xlab("") + ylim(0, 0.8) + stat_compare_means(aes(group = group),
label = "p.format", method = "wilcox.test", size = 3, label.y = 0.7) + theme(axis.text.x = element_text(angle = 90,
hjust = 1, size = 7)) + scale_colour_manual(values = consistentcolors[1:2])
p2 = ggboxplot(df_summed_intermediate[df_summed_intermediate$class == 2, ], x = "cell_type_intermediate",
y = "freq", color = "group", add = "jitter") + guides(color = FALSE) + ylim(0,
0.8) + stat_compare_means(aes(group = group), label = "p.format", method = "wilcox.test",
size = 3, label.y = 0.7) + theme(axis.text.x = element_text(angle = 90, hjust = 1,
size = 7)) + scale_colour_manual(values = consistentcolors[1:2])
pdf("cell_type_intermediate_lungs_all_boxplot.pdf", width = 12, height = 10)
print(plot_grid(p1, p2, labels = "", nrow = 2, align = "hv", axis = "tblr"))
dev.off()
# calculate ratio of AT2 to AT1 cells in each patient, compare ratio in COVID-19 vs.
# Control samples in a boxplot
at2freq = df_summed_intermediate[df_summed_intermediate$cell_type_intermediate ==
"AT2", ]$freq
at1freq = df_summed_intermediate[df_summed_intermediate$cell_type_intermediate ==
"AT1", ]$freq
group = df_summed_intermediate[df_summed_intermediate$cell_type_intermediate == "AT1",
]$group
for (i in 1:length(at2freq)) {
atratio = at2freq/at1freq
}
df_at = data.frame(atratio = atratio, group = group, a = "")
ggboxplot(df_at, x = "a", y = "atratio", color = "group", add = "jitter") + stat_compare_means(aes(group = group),
label = "p.format", method = "wilcox.test") + theme(axis.text.x = element_text(angle = 90,
hjust = 1)) + scale_colour_manual(values = consistentcolors[1:2]) + ylab("AT2/AT1 Ratio") +
xlab("")
ggsave(paste0("Figure_3K.pdf"), width = 4,
height = 7)
# calculate frequencies of fibroblast classes, merging Intermediate pathological
# FB with Pathological FB
df_tobesummed_fine_fibroblast = data.frame(orig.ident = data_lungs_all$orig.ident,
group = data_lungs_all$group, cell_type_fine = data_lungs_all$cell_type_fine)
df_tobesummed_fine_fibroblast$cell_type_fine[df_tobesummed_fine_fibroblast$cell_type_fine ==
"Intermediate pathological FB"] = "Pathological FB"
df_tobesummed_fine_fibroblast = df_tobesummed_fine_fibroblast[df_tobesummed_fine_fibroblast$cell_type_fine %in%
c("Adventitial FB", "Alveolar FB", "Mesothelial FB", "Other FB1", "Other FB2",
"Other FB3", "Other FB", "Intermediate pathological FB", "Pathological FB"),
]
df_summed_fine_fibroblast = df_tobesummed_fine_fibroblast %>% group_by(orig.ident,
cell_type_fine, group) %>% tally()
df_summed_fine_fibroblast = df_summed_fine_fibroblast %>% group_by(orig.ident) %>%
mutate(freq = n/sum(n))
# calculate frequencies of pathological fibroblasts in each patient, compare ratio in COVID-19 vs.
# Control samples in a boxplot
pathological_FB_freq = df_summed_intermediate[df_summed_intermediate$cell_type_intermediate ==
"Pathological FB", ]$freq
group = df_summed_intermediate[df_summed_intermediate$cell_type_intermediate == "Pathological FB",
]$group
df_pfb = data.frame(pathological_FB_freq = pathological_FB_freq, group = group, a = "")
ggboxplot(df_at, x = "a", y = "pathological_FB_freq", color = "group", add = "jitter") + stat_compare_means(aes(group = group),
label = "p.format", method = "wilcox.test") + theme(axis.text.x = element_text(angle = 90,
hjust = 1)) + scale_colour_manual(values = consistentcolors[1:2]) + ylab("Fraction among Fibroblasts") +
xlab("Pathological Fibroblasts")
ggsave(paste0("Figure_4G.pdf"), width = 4,
height = 7)
# calculate frequencies of fibroblast classes, normalizing by total frequency of
# all cells
df_tobesummed_fine_fibroblast_norm_allcells = data.frame(orig.ident = data_lungs_all$orig.ident,
group = data_lungs_all$group, cell_type_fine = data_lungs_all$cell_type_fine)
df_summed_fine_fibroblast_norm_allcells = df_tobesummed_fine_fibroblast_norm_allcells %>%
group_by(orig.ident, cell_type_fine, group) %>% tally()
df_summed_fine_fibroblast_norm_allcells = df_summed_fine_fibroblast_norm_allcells %>%
group_by(orig.ident) %>% mutate(freq = n/sum(n))
df_summed_fine_fibroblast_norm_allcells = subset(df_summed_fine_fibroblast_norm_allcells,
cell_type_fine %in% c("Adventitial FB", "Alveolar FB", "Mesothelial FB", "Other FB1",
"Other FB2", "Other FB3", "Other FB", "Intermediate pathological FB", "Pathological FB"))
df_summed_fine_fibroblast_norm_allcells = subset(df_summed_fine_fibroblast_norm_allcells,
fibroblast_type != "")
# plot COVID-19 and Control frequencies of fibroblast classes in boxplot
ggboxplot(df_summed_fine_fibroblast_norm_allcells, x = "fibroblast_type", y = "freq",
color = "group", add = "jitter") + ylim(c(0, 0.4)) + stat_compare_means(aes(group = group),
label = "p.format", method = "wilcox.test") + theme(axis.text.x = element_text(angle = 90,
hjust = 1)) + scale_colour_manual(values = consistentcolors[1:2]) + ylab("Fraction of Cells")
ggsave(paste0("Extended_Data_Figure_12D.pdf"), width = 4, height = 7)
# calculate frequencies of macrophage classes, normalizing by total frequency of
# immune cells
df_tobesummed_fine_macrophage_norm_allimmunecells = data.frame(orig.ident = data_lungs_all$orig.ident,
group = data_lungs_all$group, cell_type_fine = data_lungs_all$cell_type_fine,
immune_status = data_lungs_all$immune_status)
df_tobesummed_fine_macrophage_norm_allimmunecells = subset(df_tobesummed_fine_macrophage_norm_allimmunecells,
immune_status == "Immune")
df_summed_fine_macrophage_norm_allimmunecells = df_tobesummed_fine_macrophage_norm_allimmunecells %>%
group_by(orig.ident, cell_type_fine, group) %>% tally()
df_summed_fine_macrophage_norm_allimmunecells = df_summed_fine_macrophage_norm_allimmunecells %>%
group_by(orig.ident) %>% mutate(freq = n/sum(n))
df_summed_fine_macrophage_norm_allimmunecells = subset(df_summed_fine_macrophage_norm_allimmunecells,
cell_type_fine %in% c("Alveolar macrophages", "Monocyte-derived macrophages",
"Monocytes", "Transitioning MDM"))
# plot COVID-19 and Control frequencies of macrophage classes in boxplot
ggboxplot(df_summed_fine_macrophage_norm_allimmunecells, x = "cell_type_fine", y = "freq",
color = "group", add = "jitter") + ylim(c(0, 0.4)) + stat_compare_means(aes(group = group),
label = "p.format", method = "wilcox.test") + theme(axis.text.x = element_text(angle = 90,
hjust = 1)) + scale_colour_manual(values = consistentcolors[1:2]) + ylab("Fraction of Cells")
ggsave(paste0("Extended_Data_Figure_4G.pdf"), width = 4, height = 7)
# for either just COVID-19 samples, just Control samples, or for all samples,
# perform similar analyses as above, but this time, comparing cell frequencies
# divided by sex, rather than disease status makes Extended Data Figure 3A and B
boxplotgroups = c("COVID-19", "Control", "")
for (i in 1:length(boxplotgroups)) {
if (boxplotgroups[i] != "") {
data_lungs_all_temp = subset(data_lungs_all, group == boxplotgroups[i])
df_tobesummed_sex = data.frame(orig.ident = data_lungs_all_temp$orig.ident,
sex = data_lungs_all_temp$sex, cell_type_main = data_lungs_all_temp$cell_type_main,
cell_type_fine = data_lungs_all_temp$cell_type_fine, cell_type_intermediate = data_lungs_all_temp$cell_type_intermediate,
immune_status = data_lungs_all_temp$immune_status)
df_tobesummed_intermediate_sex = data.frame(orig.ident = data_lungs_all_temp$orig.ident,
sex = data_lungs_all_temp$sex, cell_type_intermediate = data_lungs_all_temp$cell_type_intermediate,
immune_status = data_lungs_all_temp$immune_status)
df_tobesummed_main_sex = data.frame(orig.ident = data_lungs_all_temp$orig.ident,
sex = data_lungs_all_temp$sex, cell_type_main = data_lungs_all_temp$cell_type_main,
immune_status = data_lungs_all_temp$immune_status)
if (boxplotgroups[i] == "Control") {
suffix = "_ctr"
} else {
suffix = "_cov"
}
} else {
df_tobesummed_sex = data.frame(orig.ident = data_lungs_all$orig.ident, sex = data_lungs_all$sex,
cell_type_main = data_lungs_all$cell_type_main, cell_type_fine = data_lungs_all$cell_type_fine,
cell_type_intermediate = data_lungs_all$cell_type_intermediate, immune_status = data_lungs_all$immune_status)
df_tobesummed_intermediate_sex = data.frame(orig.ident = data_lungs_all$orig.ident,
sex = data_lungs_all$sex, cell_type_intermediate = data_lungs_all$cell_type_intermediate,
immune_status = data_lungs_all$immune_status)
df_tobesummed_main_sex = data.frame(orig.ident = data_lungs_all$orig.ident,
sex = data_lungs_all$sex, cell_type_main = data_lungs_all$cell_type_main,
immune_status = data_lungs_all$immune_status)
suffix = ""
}
df_summed_sex = df_tobesummed_sex %>% group_by(orig.ident, cell_type_intermediate,
cell_type_main, immune_status, sex) %>% tally()
df_summed_sex = df_summed_sex %>% group_by(orig.ident) %>% mutate(freq = n/sum(n))
df_summed_main_sex = df_tobesummed_main_sex %>% group_by(orig.ident, cell_type_main,
immune_status, sex) %>% tally()
df_summed_main_sex = df_summed_main_sex %>% group_by(orig.ident) %>% mutate(freq = n/sum(n))
df_summed_intermediate_sex = df_tobesummed_intermediate_sex %>% group_by(orig.ident,
cell_type_intermediate, immune_status, sex) %>% tally()
df_summed_intermediate_sex = df_summed_intermediate_sex %>% group_by(orig.ident) %>%
mutate(freq = n/sum(n))
ggboxplot(df_summed_main_sex, x = "cell_type_main", y = "freq", color = "sex",
add = "jitter") + ylim(0, 0.8) + stat_compare_means(aes(group = sex), label = "p.format",
method = "wilcox.test") + theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
scale_colour_manual(values = consistentcolors[1:2])
ggsave(paste0("cell_type_main_lungs_all_sex", suffix, "_boxplot.pdf"), width = 11,
height = 7)
# This plot can be either Extended Data Sex Differences a or b
ggboxplot(df_summed_intermediate_sex, x = "cell_type_intermediate", y = "freq",
color = "sex", add = "jitter") + ylim(0, 0.6) + stat_compare_means(aes(group = sex),
label = "p.format", method = "wilcox.test", size = 2, label.y = 0.55) + theme(axis.text.x = element_text(angle = 90,
hjust = 1)) + scale_colour_manual(values = consistentcolors[1:2])
ggsave(paste0("cell_type_intermediate_lungs_all_sex", suffix, "_onerow_boxplot.pdf"),
width = 12, height = 5)
allcelltypesintermediate = sort(unique(df_summed_intermediate_sex$cell_type_intermediate))
df_summed_intermediate_sex$class = 1
df_summed_intermediate_sex$class[df_summed_intermediate_sex$cell_type_intermediate %in%
allcelltypesintermediate[11:20]] = 2
p1 = ggboxplot(df_summed_intermediate_sex[df_summed_intermediate_sex$class ==
1, ], x = "cell_type_intermediate", y = "freq", color = "sex", add = "jitter") +
xlab("") + ylim(0, 0.8) + stat_compare_means(aes(group = sex), label = "p.format",
method = "wilcox.test") + theme(axis.text.x = element_text(angle = 90, hjust = 1,
size = 7)) + scale_colour_manual(values = consistentcolors[1:2])
p2 = ggboxplot(df_summed_intermediate_sex[df_summed_intermediate_sex$class ==
2, ], x = "cell_type_intermediate", y = "freq", color = "sex", add = "jitter") +
guides(color = FALSE) + ylim(0, 0.8) + stat_compare_means(aes(group = sex),
label = "p.format", method = "wilcox.test") + theme(axis.text.x = element_text(angle = 90,
hjust = 1, size = 7)) + scale_colour_manual(values = consistentcolors[1:2])
pdf(paste0("cell_type_intermediate_lungs_all_sex", suffix, "_boxplot.pdf"), width = 14,
height = 10)
print(plot_grid(p1, p2, labels = "", nrow = 2, align = "hv", axis = "tblr"))
dev.off()
}
|
3cb543deaeb91464bd9f11f1490c2175216482fb
|
32d1f1150418649d7a6593a0fdb264ca44cafd4d
|
/Sample_MeTWASCode.R
|
012af0bd5c825fa9464ffe688e59765178935c10
|
[] |
no_license
|
bhattacharya-a-bt/mostwas_suppdata
|
38b633b8cd615e30befbcd7af678cdad117999a1
|
2da3f38f2730d3f655ccce281281d257ffa91686
|
refs/heads/master
| 2021-06-14T01:14:12.145427
| 2020-11-30T16:10:19
| 2020-11-30T16:10:19
| 254,466,476
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,998
|
r
|
Sample_MeTWASCode.R
|
### Define a vector geneList with the genes you wish to train
tempFolder = paste0('temp',i,'/')
if (dir.exists(tempFolder)){
system(paste0('rm -r ',tempFolder))
}
if (!dir.exists(tempFolder)){
dir.create(tempFolder)
}
require(bigsnpr)
require(data.table)
require(MOSTWAS)
snpObj = snp_attach(snp_readBed('TCGA_tot.bed',
backingfile = paste0(tempFolder,'temp',i,'_SnpObj.bk')))
mediator = fread('mediators_TCGA_tot.tsv')
exp = fread('inter_mRNA_TCGA_121019.tsv')
colnames(exp) = colnames(mediator)
mediator = rbind(mediator,exp)
mediator = mediator[!duplicated(mediator$Mediator),]
medLocs = fread('TCGAmediatorlocs.txt')
medLocs$right = medLocs$pos + 1
geneLocs = fread('TCGAgenelocs.txt')
colnames(medLocs) = colnames(geneLocs)
medLocs = rbind(geneLocs,medLocs)
medLocs = medLocs[!duplicated(medLocs$geneid),]
rm(geneLocs)
covariates = fread('TCGA_covs.txt')
qtlFull = fread('TCGA_QTL_medsTomRNA_top5.tsv')
for (g in geneList){
print(g)
if (!paste0(g,'.wgt.med.RData') %in% list.files('MeTWASModels/')){
MeTWAS(geneInt = g,
snpObj = snpObj,
mediator = mediator,
medLocs = medLocs,
covariates = covariates,
dimNumeric = 5,
qtlFull = qtlFull,
h2Pcutoff = 1,
numMed = 5,
seed = 1218,
k = 5,
cisDist = 1e6,
parallel = F,
prune = T,
ldThresh = .5,
cores = 5,
verbose = F,
R2Cutoff = -1,
modelDir = 'MeTWASModels/',
tempFolder = tempFolder)
fff = paste0(tempFolder,list.files(tempFolder))
fff = fff[!(fff %in% paste0(tempFolder,'temp',
i,'_SnpObj.bk',
c('.bk','.rds')))]
file.remove(fff)}
}
system(paste0('rm -r ',tempFolder))
|
4d15573a1e40db8043ad730579ed51f033c15e75
|
f545016bc144f83e5501557ec30933b81e0e3526
|
/scripts/test.R
|
1b53a858c48aca7032a4c6ab0a21faa19f8adf2d
|
[] |
no_license
|
Zirys1/who-intervenes-how
|
257510720345c7c4169223ec90ae1a6c407dfd17
|
2e87bdb3b19bb6d8c36c41dcbfc427df05e55fcd
|
refs/heads/master
| 2021-01-24T23:40:52.246568
| 2016-08-18T15:56:28
| 2016-08-18T15:56:28
| 68,587,422
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,630
|
r
|
test.R
|
library(mgcfv)
FindIt(model.treat = Donation ~ RecvsDefD, model.main = ~ ReactanceM,
model.int = ~ ReactanceM,
data = df, type = "continuous",
treat.type = "single")
summary(lm(Dist ~ NosvsSomeD*RecvsDefD * ReactanceM, df))
###################################################
## Example 2: Treatment-Treatment Interaction
###################################################
## Not run:
data(GerberGreen)
## The model includes four factorial treatments and
## all two, three, four-way interactions between them.
## Four pre-treatment covariates are adjusted.
## Run to search for lambdas.
summary(lm(Dist ~ RecvsDefD*NosvsSomeD*ReactanceM+EAI+moralD, df))
F2<- FindIt(model.treat= Dist ~ RecvsDefD+NosvsSomeD+ReactanceM,
nway=3,
model.main= ~ EAI+moralD,
data = df,
type="binary",
treat.type="multiple")
summary(F2)
pred2 <- predict(F2, unique = T)
head(pred2$data, n = 10)
plot(pred2)
## Fit, given selected lambdas.
F2<- FindIt(model.treat= voted98 ~ persngrp+phnscrpt+mailings+appeal,
nway=4,
model.main= ~ age+majorpty+vote96.1+vote96.0,
data = GerberGreen,
type="binary",
treat.type="multiple",
search.lambdas=FALSE,
lambdas=c(-15.000,-6.237))
## Returns coefficient estimates.
summary(F2)
## Returns predicted values for unique treatment combinations.
pred2 <- predict(F2,unique=TRUE)
## Top 10
head(pred2$data, n=10)
## Bottom 10
tail(pred2$data, n=10)
## Visualize predicted values for each treatment combination.
plot(pred2)
## End(Not run)
|
806081163bb8a4d0b3c2c2f166338b726c37c380
|
673468682a91337871c7ef62f941f71f9bc01521
|
/Curso_Fundamentos_R/Scripts/Sesion7-FundamentosR.R
|
38eb543488298be7a32491dcc0bfb0dfc3e7a6e4
|
[] |
no_license
|
MiguelAngelderobles/Curso-R
|
3bdf16649fca1d214fe4ec0dc8eacb767eb3db00
|
ecd62668c6b45a38dfc27191b54f0997abb5116d
|
refs/heads/main
| 2023-05-31T06:45:25.861449
| 2021-06-13T20:41:35
| 2021-06-13T20:41:35
| 376,636,357
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,839
|
r
|
Sesion7-FundamentosR.R
|
"
title: Fundamentos R desde 0
author: Veronica J. Gomez Catunta
Sesión: Número 7
"
# Importar Datos ----------------------------------------------------------
# Cargar paquete readr
# Nota.- No necesitamos instalar porque la instalacion se realizo en la sesión 6
library(readr)
#trim_ws = Deben recortarse los espacios en blanco iniciales y finales de cada campo antes de analizarlo
data_casos_confirmado_bolivia_depart = read_delim("/home/migueldr/Escritorio/CursosR/Curso_Fundamentos_R/Datos/nmero-de-casos-confirmado-bolivia-depart.csv",";",trim_ws = TRUE)
data_casos_confirmado_bolivia_depart
#Mostrar los detalles de los campos de la data
str(data_casos_confirmado_bolivia_depart)
#Convertir la columna Fecha a Date
# as.Date() = Convierte a tipo Date
data_casos_confirmado_bolivia_depart$Fecha = as.Date(data_casos_confirmado_bolivia_depart$Fecha, format="%d/%m/%Y" )
#Mostrar las primeras filas de la data
head(data_casos_confirmado_bolivia_depart)
#Mostrar las ultimas filas de la data
tail(data_casos_confirmado_bolivia_depart)
#Mostrar dimension de la data
dim(data_casos_confirmado_bolivia_depart)
#NOTA.- Tambien podemos importar datos desde la interfaz de RStudio
# 1.- Menu environment, import Data set
# importar desde excel ----------------------------------------------------
library(readxl)
data_casos_la_paz_dia = read_excel("/home/migueldr/Escritorio/CursosR/Curso_Fundamentos_R/Datos/nmero-de-casos-la-paz-dia.xlsx", sheet = "nmero-de-casos-la-paz-po")
data_casos_la_paz_dia
str(data_casos_la_paz_dia)
data_casos_la_paz_dia$Fecha = as.Date(data_casos_la_paz_dia$Fecha)
str(data_casos_la_paz_dia)
#NOTA.- Tambien podemos importar datos desde la interfaz de RStudio
# 1.- Menu environment, import Data set
# Descargar data desde el repositorio de Ciencia de datos(github) -----------------
install.packages("remotes")
remotes::install_github("cienciadedatos/datos")
millas=datos::millas
millas
aviones=datos::aviones
aviones
# Graficando en R ---------------------------------------------------------
# Cargar el paquete ggplot2
# Nota1.- no es necesario instalar porque ya se instalo en la sesion anterior
# al momento de instalar el paquete tydiverse
# Nota2.- Si fuera necesario tambien pueden instalar independiente solo
# el paquete ggplot2
library(ggplot2)
#Data millas
millas
#--Descripcion de la Data
# fabricante = fabricante
# modelo = nombre del modelo
# cilindrada = tamaño del motor del automóvil, en litros
# anio = año de fabricación
# cilindros = número de cilindros
# transmision = tipo de transmisión
# traccion = tipo de tracción (d = delantera, t = trasera, 4 = 4 ruedas)
# ciudad = millas por galón de combustible en ciudad
# autopista = millas por galón de combustible en autopista
# combustible = tipo de combustible (p = premium, r = regular, e = etanol, d = diesel, g = gas natural comprimido)
# clase = tipo de auto
# GRAFICO DE BARRAS -------------------------------------------------------
# asignando variable a la estetica
ggplot(data = millas,
mapping = aes(x=clase))
ggplot(data = millas,
mapping = aes(x=clase))+
geom_bar()
ggplot(data = millas,
mapping = aes(x=clase))+
geom_bar(fill="blue")
ggplot(data = millas,
mapping = aes(x=clase))+
geom_bar(fill="blue")+
labs(
title = "Numero de autos por tipo",
subtitle = "Tipo de auto entre 1999 y 2008",
caption = "fuente: Data set del repositorio de Ciencia de datos",
x = "Nunero de autos",
y = "Tipo de autos"
)
ggplot(data = millas,
mapping = aes(x=clase, fill=clase))+
geom_bar()+
labs(
title = "Numero de autos por tipo",
subtitle = "Tipo de auto entre 1999 y 2008",
caption = "fuente: Data set del repositorio de Ciencia de datos",
x = "Nunero de autos",
y = "Tipo de autos"
)
# -----Diagrama de barras con Etiqueta de datos del numero de autos por tipo
table(millas$clase)
data_millas_clase = as.data.frame(table(millas$clase))
data_millas_clase
#modificando los nombres de las columnas
colnames(data_millas_clase)=c("clase", "numero")
data_millas_clase
ggplot(data = data_millas_clase,
mapping = aes(x=clase, y=numero, fill=clase))+
geom_bar(stat = "identity")+
geom_text(aes(label=numero), vjust=-0.5)+
labs(
title = "Numero de autos por tipo",
subtitle = "Tipo de auto entre 1999 y 2008",
caption = "fuente: Data set del repositorio de Ciencia de datos",
x = "Nunero de autos",
y = "Tipo de autos"
)
# GRAFICO DE TORTA --------------------------------------------------------
# total de clase = 234
data_millas_clase
#
data_millas_clase$porc_clase = round(((data_millas_clase$numero)*100)/sum(data_millas_clase$numero))
data_millas_clase
ggplot(data = data_millas_clase,
mapping = aes(x="", y = porc_clase, fill=clase))+
geom_bar(stat = "identity")
ggplot(data = data_millas_clase,
mapping = aes(x="", y = porc_clase, fill=clase))+
geom_bar(stat = "identity")+
geom_text(aes(label=paste(porc_clase,"%",sep ="")), position = position_stack(vjust = 0.5))
ggplot(data = data_millas_clase,
mapping = aes(x="", y = porc_clase, fill=clase))+
geom_bar(stat = "identity")+
geom_text(aes(label=paste(porc_clase,"%",sep ="")), position = position_stack(vjust = 0.5))+
theme_void()
ggplot(data = data_millas_clase,
mapping = aes(x="", y = porc_clase, fill=clase))+
geom_bar(stat = "identity")+
geom_text(aes(label=paste(porc_clase,"%",sep ="")), position = position_stack(vjust = 0.5))+
theme_void()+
coord_polar("y")
# GRAFICO DE PUNTOS -------------------------------------------------------
# DATA = flores = Datos sobre la flor Iris de Edgar Anderson
# Largo.Sepalo Largo del sépalo
# Ancho.Sepalo Ancho del sépalo
# Largo.Petalo Largo del pétalo
# Ancho.Petalo Ancho del pétalo
# Especies A qué especie de la flor Iris corresponde (setosa, versicolor, virginica)
flores=datos::flores
flores
ggplot(flores,
mapping = aes(x=Largo.Petalo, y=Ancho.Petalo))+
geom_point()
ggplot(flores,
mapping = aes(x=Largo.Petalo, y=Ancho.Petalo, color=Especies))+
geom_point()
ggplot(flores,
mapping = aes(x=Largo.Petalo, y=Ancho.Petalo, color=Especies, size=Ancho.Sepalo))+
geom_point()
ggplot(flores,
mapping = aes(x=Largo.Petalo, y=Ancho.Petalo, color=Especies, size=Ancho.Sepalo))+
geom_point()+
labs(
title = "Datos sobre la flor Iris de Edgar Anderson",
subtitle = "Tipos de flor por largo de petalo y ancho",
caption = "fuente: Data set del repositorio de Ciencia de datos",
x = "Laro de Petalo",
y = "Ancho de Petalo"
)
|
45f2716f736fd34e4115c9bdc56587b5dbea288e
|
4a4cbd2a6b4af7c337f6e754e8f89c2aa06efd66
|
/Recuit_Simule_Mesim4.R
|
f2ebc009677acb038c6632d0cfdb7becc64b8570
|
[] |
no_license
|
Skoomy/MonteCarlo-Simulation-with-R-Mesim-
|
6ec58fc9a6bd1ad4941dc410dce9c119347eb6f3
|
e24a531e988764f896bdb753b3838394e49391ad
|
refs/heads/master
| 2021-10-25T02:35:36.039309
| 2021-10-13T14:09:20
| 2021-10-13T14:09:20
| 56,853,095
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 914
|
r
|
Recuit_Simule_Mesim4.R
|
###Recuit simulé In R
f<-function(x){
return (5/49*(x-10)^2+4)
}
p=ggplot(data.frame(x=c(0, 2)), aes(x)) + stat_function(fun=f)
recuit<-function(f,x0,Ti,Tf,ampli,alpha,Max_etat,Iter)
{
l=vector(,Iter)
x=xopt=x0;
fx=fxopt=f(x0);
Tmp=Ti;
ess= 0;
while(Tmp>Tf&&xopt<3){
etat=0;
for(i in 1:Iter){
y=x+ampli*runif(min=0,max=1,n=1)-0.5;
fy=f(y);
if(fy-fx<0){
x=y;
fx=fy;
if(fx<fxopt){
xopt=x;
fxopt=f(xopt);
}
etat=etat+1;
}
else{
if(runif(min=0,max=0.1,n = 1)<=exp(-(fy-fx)/Tmp)){
x=y;
fx=fy;
etat=etat+1;
}
}
ess=ess+1;
if(etat==Max_etat)
{break;}
l[i]=Tmp
}
Tmp=Tmp*alpha
} #end while
plot(l)
return (list(Xopt=xopt,Fxopt=f(xopt)))}
recuit(f,2,100,1,1,0.001,200,2000)
|
f83d1e1b5f484a61ea3d76a49082950697089f50
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/predictmeans/R/covariatemeans.R
|
8aace5065af43b789117d7513fdbdef77dd46cb4
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,446
|
r
|
covariatemeans.R
|
covariatemeans <- function (model, modelterm=NULL, covariate, level=0.05, Df=NULL, trans=NULL,
responsen=NULL, trillis=TRUE, plotord=NULL, mtitle=NULL, ci=TRUE, point=TRUE, jitterv=0, newwd=TRUE) {
if (is.null(modelterm) || modelterm%in%c("NULL", "")) {
modelterm <- covariate
trillis=FALSE
}
vars <- unlist(strsplit(modelterm, "\\:"))
ctr.matrix <- Kmatrix(model, modelterm, covariate)
KK <- ctr.matrix$K
pltdf <- ctr.matrix$fctnames
response <- ctr.matrix$response
preddf <- ctr.matrix$preddf
mp <- mymodelparm(model)
bhat <- mp$coef
# We'll work only with the non-NA elements of bhat
KK <- KK[, mp$estimable, drop=FALSE]
pltdf$yhat <- KK%*%bhat
pltdf$ses <- sqrt(base::diag(KK %*% tcrossprod(mp$vcov, KK)))
if (is.null(Df) || Df%in%c("NULL", "")) {
if (class(model)[1] == "lme") {
Df <- terms(model$fixDF)[modelterm]
}else if (class(model)[1] == "lmerMod") {
termlabel <- attr(terms(model),"term.labels")
for (i in vars) termlabel <- termlabel[grep(i, termlabel)]
termlabel <- paste(termlabel, collapse="-")
model.b <- update( model, as.formula(paste(".~. -", termlabel)))
Df <- getKR(KRmodcomp(model, model.b), "ddf")
}else Df <- mp$df
if (Df==0) stop("You need provide Df for this model!")
}
if (class(model)[1]=="glm" && is.null(trans)) trans <- model$family$linkinv
if (class(model)[1] == "glmerMod" && is.null(trans)) trans <- slot(model, "resp")$family$linkinv
Mean <- LL <- UL <- xvar <- factors <- bky <- NULL
if (is.null(trans)) {
pltdf$Mean <- pltdf$yhat
pltdf$LL <- pltdf$yhat - qt(1 - level/2, df = Df) * pltdf$ses
pltdf$UL <- pltdf$yhat + qt(1 - level/2, df = Df) * pltdf$ses
}else{
pltdf$Mean <- trans(pltdf$yhat)
pltdf$LL <- trans(pltdf$yhat - qt(1 - level/2, df = Df) * pltdf$ses)
pltdf$UL <- trans(pltdf$yhat + qt(1 - level/2, df = Df) * pltdf$ses)
}
pltdf$yhat <- pltdf$ses <- NULL
if (modelterm==covariate) pltdf$factors <- factor(1) else pltdf$factors <- factor(do.call("paste", c(pltdf[, vars, drop=FALSE], sep=":")))
colnames(pltdf)[colnames(pltdf)==covariate] <- "xvar"
# delete empty factor combinations
mdf <- model.frame(model)
if (!(response %in% names(mdf))) mdf[,response] <- eval(parse(text=response), mdf)
mdf <- cbind(mdf, preddf[, !names(preddf)%in%names(mdf), drop=FALSE])
ndf <- data.frame(table(mdf[, vars, drop = FALSE]))
if (any(ndf$Freq==0)) {
ndf0 <- ndf[ndf$Freq==0, , drop=FALSE]
ndf0$factors <- factor(do.call("paste", c(ndf0[, vars, drop=FALSE], sep=":")))
pltdf <- pltdf[!pltdf$factors%in%ndf0$factors, ]
}
if (is.null(mtitle) || mtitle%in%c("NULL", "")) mtitle <- paste("Fitted and observed relationship with", (1-level)*100, "% CI")
if (is.null(trans)) {
mdf$bky <- mdf[, response]
}else{
if (response %in% names(mdf)) { ## Transformed y before modelling
if (class(model)[1]%in%c("glm", "glmerMod")) {
if (class(mdf[, response])=="factor") {
mdf$bky <- as.numeric(mdf[, response])-1
}else if (!is.null(dim(mdf[, response]))) {
mdf$bky <- mdf[, response][,1]/rowSums(mdf[, response])
# (is.null(responsen) || responsen%in%c("NULL", "")) stop("Please provide suitable name for response variable using option 'responsen'!")
response <- "Probability"
}else mdf$bky <- mdf[, response]
if (isTRUE(all.equal(trans,function(x) x))) {
if (class(model)[1]=="glm") mdf$bky <- model$family$linkfun(mdf$bky)
if (class(model)[1] == "glmerMod") mdf$bky <- slot(model, "resp")$family$linkfun(mdf$bky)
# f (is.null(responsen) || responsen%in%c("NULL", "")) stop("Please provide suitable name for response variable using option 'responsen'!")
response <- "Response"
}
}else{
mdf$bky <- trans(mdf[, response])
# if (is.null(responsen) || responsen%in%c("NULL", "")) stop("Please provide suitable name for response variable using option 'responsen'!")
response <- paste("Transformed", response)
}
}else{ ## Transformed y within modelling
response <- regmatches(response, regexec("\\(([^<]+)\\)", response))[[1]][2]
if (!response %in% names(mdf)) {
if (is.null(responsen) || responsen%in%c("NULL", "")) stop("Please provide suitable name for response variable using option 'responsen'!")
response <- responsen
}
mdf$bky <- mdf[, response]
}
}
if (modelterm==covariate) mdf$factors <- factor(1) else mdf$factors <- do.call("paste", c(mdf[, vars, drop=FALSE], sep=":"))
names(mdf)[names(mdf)==covariate] <- "xvar"
if (!trillis) {
if (newwd) dev.new()
if (modelterm==covariate) {
plt <- qplot(xvar, Mean, xlab=paste("\n", covariate, sep=""), geom="line", ylab=paste(response, "\n"), data=pltdf, main=paste(mtitle, "\n")) +
theme_bw()
if (ci) plt <- plt + geom_smooth(aes(ymin = LL, ymax = UL), alpha = 0.2, data=pltdf, stat="identity")
if (point) plt <- plt + geom_point(aes(x=xvar, y=bky), position = position_jitter(width = jitterv, height = jitterv), data=mdf)
}else{
plt <- qplot(xvar, Mean, xlab=paste("\n", covariate, sep=""), geom="line", ylab=paste(response, "\n"), data=pltdf, main=paste(mtitle, "\n"), colour=factors) +
theme_bw()
if (ci) plt <- plt + geom_smooth(aes(ymin = LL, ymax = UL, fill=factors), alpha = 0.2, data=pltdf, stat="identity")
if (point) plt <- plt + geom_point(aes(x=xvar, y=bky), position = position_jitter(width = jitterv, height = jitterv), data=mdf)
plt <- plt+guides(col = guide_legend(modelterm), fill=guide_legend(modelterm))
}
print(plt)
}else{
if (length(vars)==1) {
if (newwd) dev.new()
plt <- qplot(xvar, Mean, xlab=paste("\n", covariate, sep=""), geom="line", ylab=paste(response, "\n"), data=pltdf, main=paste(mtitle, "\n"), colour=factors) +
facet_wrap(~ factors)+
theme_bw()
if (ci) plt <- plt + geom_smooth(aes(ymin = LL, ymax = UL, fill=factors), alpha = 0.2, data=pltdf, stat="identity")
if (point) plt <- plt + geom_point(aes(x=xvar, y=bky), position = position_jitter(width = jitterv, height = jitterv), data=mdf)
plt <- plt+guides(col = guide_legend(modelterm), fill=guide_legend(modelterm))
if (modelterm==covariate) plt <- plt+ theme(legend.position="none")
print(plt)
}
if (length(vars)==2) {
if (newwd) dev.new()
if (is.null(plotord) || plotord%in%c("NULL", "")) plotord <- 1:2
fact1 <- (vars[plotord])[1]
fact2 <- (vars[plotord])[2]
plt <- qplot(xvar, Mean, xlab=paste("\n", covariate, sep=""), ylab=paste(response, "\n"), main=paste(mtitle, "\n"), data=pltdf,
geom="line", colour=factor(eval(parse(text = fact1)))) +
facet_grid(eval(parse(text = paste("~",fact2, sep=""))))+
theme_bw()
if (ci) plt <- plt + geom_smooth(aes(ymin = LL, ymax = UL, fill=factor(eval(parse(text = fact1)))), alpha = 0.2, data=pltdf, stat="identity")
if (point) plt <- plt + geom_point(aes(x=xvar, y=bky), position = position_jitter(width = jitterv, height = jitterv), data=mdf)
plt <- plt+guides(col = guide_legend(fact1), fill=guide_legend(fact1))
print(plt)
}
if (length(vars)==3) {
if (newwd) dev.new()
if (is.null(plotord) || plotord%in%c("NULL", "")) plotord <- 1:3
fact1 <- (vars[plotord])[1]
fact2 <- (vars[plotord])[2]
fact3 <- (vars[plotord])[3]
plt <- qplot(xvar, Mean, xlab=paste("\n", covariate, sep=""), ylab=paste(response, "\n"), main=paste(mtitle, "\n"), data=pltdf,
geom="line", colour=factor(eval(parse(text = fact1)))) +
facet_grid(eval(parse(text = paste(fact2, "~",fact3, sep=""))))+
theme_bw()
if (ci) plt <- plt + geom_smooth(aes(ymin = LL, ymax = UL, fill=factor(eval(parse(text = fact1)))), alpha = 0.2, data=pltdf, stat="identity")
if (point) plt <- plt + geom_point(aes(x=xvar, y=bky), position = position_jitter(width = jitterv, height = jitterv), data=mdf)
plt <- plt+guides(col = guide_legend(fact1), fill=guide_legend(fact1))
print(plt)
}
}
return(invisible(plt))
}
|
c92ed0e050222db51a3dafae61538edde6f62090
|
1056f60631d647cc54525da3aaae774114b2d81c
|
/R/blocklist.r
|
91529b83ff11e7349d9c4274b73af981227085b1
|
[] |
no_license
|
hrbrmstr/blocklist
|
f897df33c102e2556dfbec600c7005aa2a7970d0
|
089aad5779abe521bda38c2659ea120d69918adf
|
refs/heads/master
| 2021-01-10T10:08:35.962525
| 2016-04-04T02:45:25
| 2016-04-04T02:45:25
| 55,381,067
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,175
|
r
|
blocklist.r
|
S_GET <- purrr::safely(GET)
#' Query <blocklist.de> API for IPs blocked in since a period in time, optionally
#' filtering by service
#'
#' @param since either a UNIX timestamp (e.g. \code{1459736686}), a string in "\code{HH:MM}"
#' format or a time difference in seconds (e..g \code{3600}). The API will
#' return all requested IPs logged since that time. Leave \code{NULL} for the
#' API default.
#' @param service only return addresses for a given service. Default: \code{all} services.
#' @export
#' @examples
#' # defaults
#' get_last_added_ips()
#'
#' # last hour
#' get_last_added_ips(3600)
#'
#' # since 3PM today
#' get_last_added_ips("15:00")
#'
#' # for ssh blocks in the last hour
#' get_last_added_ips(3600, "ssh")
get_last_added_ips <- function(since=NULL,
service=c("all", "amavis", "apacheddos", "asterisk",
"badbot", "ftp", "imap", "ircbot",
"mail", "pop3", "regbot", "rfi-attack",
"sasl", "ssh", "w00tw00t", "portflood",
"sql-injection", "webmin", "trigger-spam",
"manuall", "bruteforcelogin")) {
service <- match.arg(service, c("all","amavis", "apacheddos", "asterisk", "badbot",
"ftp", "imap", "ircbot", "mail", "pop3", "regbot",
"rfi-attack", "sasl", "ssh", "w00tw00t", "portflood",
"sql-injection", "webmin", "trigger-spam", "manuall",
"bruteforcelogin"))
query <- list(time=since %||% "")
if (service != "all") { query$service <- service }
res <- S_GET("http://api.blocklist.de/getlast.php", query=query)
if (is.null(res$result)) {
stop("Error querying <blocklist.de> API", call.=FALSE)
}
httr::warn_for_status(res$result)
tmp <- readLines(textConnection(content(res$result, as="text")))
if (grepl("ERROR", tmp[1])) {
warning("<blocklist.de> API error (check `since` specification)")
return(NA)
}
tmp[tmp != ""]
}
|
11d0f39497241905c4f2d14aa591e3d250a5dc18
|
8dfdf4263c8b372a9c728a8f482db9f429e38183
|
/Twitter_data_extraction.R
|
7397adf79e3f16f5ff638db830b14dbd086bcacf
|
[] |
no_license
|
prayashbarua/TwitterDataWebScrapper
|
54ef9691a4c72051e94497e925e3992204376430
|
ff896df0800a6f49fc8a9540abeb7929abb0b4fb
|
refs/heads/master
| 2020-03-27T03:29:53.732724
| 2018-09-14T00:19:57
| 2018-09-14T00:19:57
| 145,868,046
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,984
|
r
|
Twitter_data_extraction.R
|
install.packages("twitteR")
install.packages("httr")
install.packages("ROAuth")
install.packages("tm")
install.packages("tmap")
install.packages("rtweet")
library('httr')
library("devtools")
library("twitteR")
library("ROAuth")
library("tm")
library("tmap")
library("rtweet")
library("plyr")
#oauth_endpoints("twitter")
#internapp <- oauth_app("twitter",key = "mgpK3ZQIlAj5zYC9DAtnv8tDz", secret = "xZRybHbc3PhZxgJSLlQgZnXVgflMpk1oj5qQ19WMeQSrwjTyZ3X" )
#download.file(url="http://curl.haxx.se/ca/cacert.pem",destfile="/Users/prayash/cacert.pem")
#cred <- OAuthFactory$new(consumerKey='mgpK3ZQIlAj5zYC9DAtnv8tDz',
# consumerSecret='xZRybHbc3PhZxgJSLlQgZnXVgflMpk1oj5qQ19WMeQSrwjTyZ3X',
# requestURL='https://api.twitter.com/oauth/request_token',
# accessURL='https://api.twitter.com/oauth/access_token',
# authURL='https://api.twitter.com/oauth/authorize')
#cred$handshake(cainfo='cacert.pem')
options(httr_oauth_cache=T)
consumerKey <- 'mgpK3ZQIlAj5zYC9DAtnv8tDz'
consumerSecret <- 'xZRybHbc3PhZxgJSLlQgZnXVgflMpk1oj5qQ19WMeQSrwjTyZ3'
access_token <- '1006556300925394945-2JVD8kjIBe6fqIciIMXh7YXUUxKnrI'
access_secret <- 'jYDAZNZp8mNJMlH6AV0aVESd5ywX6H614amzOTVSJ4sBT'
setup_twitter_oauth(consumerKey, consumerSecret, access_token, access_secret)
#devtools::install_version("httr",version = "0.6.0",repos="http://cran.us.r-project.org")
SunCoastCU_1 <- searchTwitter('Suncoast Credit',n=8, lang="en",resultType="recent")
SpaceCoastCU_2 <- searchTwitter('Space coast credit',n=3, lang="en",resultType="recent",since = "2016-06-01")
VyStarCU_3 <- searchTwitter('VyStar',n=169, lang="en",resultType="recent",since = "2016-06-01")
NavyArmyCU_4 <- searchTwitter('navy army ccu ',n=1, lang="en",resultType="recent",since = "2016-06-01")
GeorgiasOwnCU_5 <- searchTwitter('GeorgiasOwn ',n=6, lang="en",resultType="recent",since = "2016-06-01")
DeltaCommunityCU_6 <- searchTwitter('DeltaCommunity ',n=23, lang="en",resultType="recent",since = "2016-06-01")
CEFCU_7 <- searchTwitter('CEFCU ',n=56, lang="en",resultType="recent",since = "2016-06-01")
AlliantCU_8 <- searchTwitter('Alliant credit ',n=16, lang="en",resultType="recent",since = "2016-06-01")
BaxterCU_9 <- searchTwitter('BCU ',n=1, lang="en",resultType="recent",since = "2016-06-01")
TeachersCU_10 <- searchTwitter('Teachers credit union ',n=32, lang="en",resultType="recent",since = "2016-06-01")
VeridianCU_11 <- searchTwitter('Veridian credit union ',n=3, lang="en",resultType="recent",since = "2016-06-01")
UniversityofIowaCommunityCU_12 <- searchTwitter('University of Iowa Community ',n=26, lang="en",resultType="recent",since = "2016-06-01")
StateEmployeesCU_13 <- searchTwitter('State Employees credit union ',n=29, lang="en",resultType="recent",since = "2016-06-01")
DigitalFederalCU_14 <- searchTwitter('Digital Federal credit ',n=17, lang="en",resultType="recent",since = "2016-06-01")
StateEmployeesMDCU_15 <- searchTwitter('secumd ',n=12, lang="en",resultType="recent",since = "2016-06-01")
GenisysCU_16 <- searchTwitter('Genisys credit ',n=15, lang="en",resultType="recent",since = "2016-06-01")
KeeslerFederalCU_17 <- searchTwitter('kfcu ',n=10, lang="en",resultType="recent",since = "2016-06-01")
ServiceCU_18 <- searchTwitter('servicecu ',n=44, lang="en",resultType="recent",since = "2016-06-01")
SEFCU_19 <- searchTwitter('sefcu ',n=73, lang="en",resultType="recent",since = "2016-06-01")
MunicipalCU_20 <- searchTwitter('nymcu ',n=53, lang="en",resultType="recent",since = "2016-06-01")
NassauEducatorsFederalCU_21 <- searchTwitter('nefcupayment protection',n=17, lang="en",resultType="recent",since = "2016-06-01")
TeachersFederalCU_22 <- searchTwitter('tfcu ',n=113, lang="en",resultType="recent",since = "2016-06-01")
HudsonValleyFederalCU_23 <- searchTwitter('hvfcu ',n=18, lang="en",resultType="recent",since = "2016-06-01")
LandmarkCU_24 <- searchTwitter('landmarkcu ',n=44, lang="en",resultType="recent",since = "2016-06-01")
CoastalFederalCU_25 <- searchTwitter('coastal24 ',n=71, lang="en",resultType="recent",since = "2016-06-01")
TruliantFederalCU_26 <- searchTwitter('truliant ',n=99, lang="en",resultType="recent",since = "2016-06-01")
PennsylvaniaStateEmployeesCU_27 <- searchTwitter('psecu ',n=248, lang="en",resultType="recent",since = "2016-06-01")
EastmanCU_28 <- searchTwitter('eastman credit ',n=13, lang="en",resultType="recent",since = "2016-06-01")
GECU_29 <- searchTwitter('gecu',n=28, lang="en",resultType="recent",since = "2016-06-01")
RandolphBrooksFederalCU_30 <- searchTwitter('rbfcu',n=51, lang="en",resultType="recent",since = "2016-06-01")
SecurityServiceFederalCU_31 <- searchTwitter('ssfcu',n=123, lang="en",resultType="recent",since = "2016-06-01")
AmericanAirlinesFederalCU_32 <- searchTwitter('aafcu',n=5, lang="en",resultType="recent",since = "2016-06-01")
VirginiaCU_33 <- searchTwitter('vacreditunion',n=7, lang="en",resultType="recent",since = "2016-06-01")
PentagonFederalCU_34 <- searchTwitter('penfed',n=112, lang="en",resultType="recent",since = "2016-06-01")
SummitCU_35 <- searchTwitter('summitdomore',n=11, lang="en",resultType="recent",since = "2016-06-01")
CommunityFirstCU_36 <- searchTwitter('commfirstcu',n=85, lang="en",resultType="recent",since = "2016-06-01")
SchoolsFirstFederalCU_37 <- searchTwitter('schoolsfirstfcu',n=63, lang="en",resultType="recent",since = "2016-06-01")
Golden_CU_38 <- searchTwitter('golden1cu',n=49, lang="en",resultType="recent",since = "2016-06-01")
TravisCU_39 <- searchTwitter('traviscu ',n=85, lang="en",resultType="recent",since = "2016-06-01")
FirstTechnologyFederalCU_40 <- searchTwitter('firsttechfed',n=68, lang="en",resultType="recent",since = "2016-06-01")
NavyFederalCU_41 <- searchTwitter('navy federal credit union ',n=68, lang="en",resultType="recent",since = "2016-06-01")
AlaskaFederalCU_42 <- searchTwitter('alaskausa',n=8, lang="en",resultType="recent",since = "2016-06-01")
BoeingEmployeesCU_43 <- searchTwitter('@becu ',n=161, lang="en",resultType="recent",since = "2016-06-01")
IdahoCentralCU_44 <- searchTwitter('idaho central credit union',n=16, lang="en",resultType="recent",since = "2016-06-01")
EntCU_45 <- searchTwitter('ent_cu',n=233, lang="en",resultType="recent",since = "2016-06-01")
MountainValleyCU_46 <- searchTwitter('mountaincu',n=8, lang="en",resultType="recent",since = "2016-06-01")
#Getting texts from tweets
SunCoastCU_1_text <- sapply(SunCoastCU_1,function(x) x$getText())
print(SunCoastCU_1_text[3])
#make data frame
df1 <- do.call("rbind", lapply(SunCoastCU_1, as.data.frame))
df2 <- do.call("rbind", lapply(SpaceCoastCU_2, as.data.frame))
df3 <- do.call("rbind", lapply(VyStarCU_3, as.data.frame))
df4 <- do.call("rbind", lapply(NavyArmyCU_4, as.data.frame))
df5 <- do.call("rbind", lapply(GeorgiasOwnCU_5, as.data.frame))
df6 <- do.call("rbind", lapply(DeltaCommunityCU_6, as.data.frame))
df7 <- do.call("rbind", lapply(CEFCU_7, as.data.frame))
df8 <- do.call("rbind", lapply(AlliantCU_8, as.data.frame))
df9 <- do.call("rbind", lapply(BaxterCU_9, as.data.frame))
df10 <- do.call("rbind", lapply(TeachersCU_10, as.data.frame))
df11 <- do.call("rbind", lapply(VeridianCU_11, as.data.frame))
df12 <- do.call("rbind", lapply(UniversityofIowaCommunityCU_12, as.data.frame))
df13 <- do.call("rbind", lapply(StateEmployeesCU_13, as.data.frame))
df14 <- do.call("rbind", lapply(DigitalFederalCU_14, as.data.frame))
df15 <- do.call("rbind", lapply(StateEmployeesMDCU_15, as.data.frame))
df16 <- do.call("rbind", lapply(GenisysCU_16, as.data.frame))
df17 <- do.call("rbind", lapply(KeeslerFederalCU_17, as.data.frame))
df18 <- do.call("rbind", lapply(ServiceCU_18, as.data.frame))
df19 <- do.call("rbind", lapply(SEFCU_19, as.data.frame))
df20 <- do.call("rbind", lapply(MunicipalCU_20, as.data.frame))
df21 <- do.call("rbind", lapply(NassauEducatorsFederalCU_21, as.data.frame))
df22 <- do.call("rbind", lapply(TeachersFederalCU_22, as.data.frame))
df23 <- do.call("rbind", lapply(HudsonValleyFederalCU_23, as.data.frame))
df24 <- do.call("rbind", lapply(LandmarkCU_24, as.data.frame))
df25 <- do.call("rbind", lapply(CoastalFederalCU_25, as.data.frame))
df26 <- do.call("rbind", lapply(TruliantFederalCU_26, as.data.frame))
df27 <- do.call("rbind", lapply(PennsylvaniaStateEmployeesCU_27, as.data.frame))
df28 <- do.call("rbind", lapply(EastmanCU_28, as.data.frame))
df29 <- do.call("rbind", lapply(GECU_29, as.data.frame))
df30 <- do.call("rbind", lapply(RandolphBrooksFederalCU_30, as.data.frame))
df31 <- do.call("rbind", lapply(SecurityServiceFederalCU_31, as.data.frame))
df32 <- do.call("rbind", lapply(AmericanAirlinesFederalCU_32, as.data.frame))
df33 <- do.call("rbind", lapply(VirginiaCU_33, as.data.frame))
df34 <- do.call("rbind", lapply(PentagonFederalCU_34, as.data.frame))
df35 <- do.call("rbind", lapply(SummitCU_35, as.data.frame))
df36 <- do.call("rbind", lapply(CommunityFirstCU_36, as.data.frame))
df37 <- do.call("rbind", lapply(SchoolsFirstFederalCU_37, as.data.frame))
df38 <- do.call("rbind", lapply(Golden_CU_38, as.data.frame))
df39 <- do.call("rbind", lapply(TravisCU_39, as.data.frame))
df40 <- do.call("rbind", lapply(FirstTechnologyFederalCU_40, as.data.frame))
df41 <- do.call("rbind", lapply(NavyFederalCU_41, as.data.frame))
df42 <- do.call("rbind", lapply(AlaskaFederalCU_42, as.data.frame))
df43 <- do.call("rbind", lapply(BoeingEmployeesCU_43, as.data.frame))
df44 <- do.call("rbind", lapply(IdahoCentralCU_44, as.data.frame))
df45 <- do.call("rbind", lapply(EntCU_45, as.data.frame))
df46 <- do.call("rbind", lapply(MountainValleyCU_46, as.data.frame))
write.csv(df1,file="/Users/prayash/Downloads/SunCoastCU_1.csv")
write.csv(df2,file="/Users/prayash/Downloads/SpaceCoastCU_2.csv")
write.csv(df3,file="/Users/prayash/Downloads/VyStarCU_3.csv")
write.csv(df4,file="/Users/prayash/Downloads/NavyArmyCU_4.csv")
write.csv(df5,file="/Users/prayash/Downloads/GeorgiasOwnCU_5.csv")
write.csv(df6,file="/Users/prayash/Downloads/DeltaCommunityCU_6.csv")
write.csv(df7,file="/Users/prayash/Downloads/CEFCU_7.csv")
write.csv(df8,file="/Users/prayash/Downloads/AlliantCU_8.csv")
write.csv(df9,file="/Users/prayash/Downloads/BaxterCU_9.csv")
write.csv(df10,file="/Users/prayash/Downloads/TeachersCU_10.csv")
write.csv(df11,file="/Users/prayash/Downloads/VeridianCU_11.csv")
write.csv(df12,file="/Users/prayash/Downloads/UniversityofIowaCommunityCU_12.csv")
write.csv(df13,file="/Users/prayash/Downloads/StateEmployeesCU_13.csv")
write.csv(df14,file="/Users/prayash/Downloads/DigitalFederalCU_14.csv")
write.csv(df15,file="/Users/prayash/Downloads/StateEmployeesMDCU_15.csv")
write.csv(df16,file="/Users/prayash/Downloads/GenisysCU_16.csv")
write.csv(df17,file="/Users/prayash/Downloads/KeeslerFederalCU_17.csv")
write.csv(df18,file="/Users/prayash/Downloads/ServiceCU_18.csv")
write.csv(df19,file="/Users/prayash/Downloads/SEFCU_19.csv")
write.csv(df20,file="/Users/prayash/Downloads/MunicipalCU_20.csv")
write.csv(df21,file="/Users/prayash/Downloads/NassauEducatorsFederalCU_21.csv")
write.csv(df22,file="/Users/prayash/Downloads/TeachersFederalCU_22.csv")
write.csv(df23,file="/Users/prayash/Downloads/HudsonValleyFederalCU_23.csv")
write.csv(df24,file="/Users/prayash/Downloads/LandmarkCU_24.csv")
write.csv(df25,file="/Users/prayash/Downloads/CoastalFederalCU_25.csv")
write.csv(df26,file="/Users/prayash/Downloads/TruliantFederalCU_26.csv")
write.csv(df27,file="/Users/prayash/Downloads/PennsylvaniaStateEmployeesCU_27.csv")
write.csv(df28,file="/Users/prayash/Downloads/EastmanCU_28.csv")
write.csv(df29,file="/Users/prayash/Downloads/GECU_29.csv")
write.csv(df30,file="/Users/prayash/Downloads/RandolphBrooksFederalCU_30.csv")
write.csv(df31,file="/Users/prayash/Downloads/SecurityServiceFederalCU_31.csv")
write.csv(df32,file="/Users/prayash/Downloads/AmericanAirlinesFederalCU_32.csv")
write.csv(df33,file="/Users/prayash/Downloads/VirginiaCU_33.csv")
write.csv(df34,file="/Users/prayash/Downloads/PentagonFederalCU_34.csv")
write.csv(df35,file="/Users/prayash/Downloads/SummitCU_35.csv")
write.csv(df36,file="/Users/prayash/Downloads/CommunityFirstCU_36.csv")
write.csv(df37,file="/Users/prayash/Downloads/SchoolsFirstFederalCU_37.csv")
write.csv(df38,file="/Users/prayash/Downloads/Golden_CU_38.csv")
write.csv(df39,file="/Users/prayash/Downloads/TravisCU_39.csv")
write.csv(df40,file="/Users/prayash/Downloads/FirstTechnologyFederalCU_40.csv")
write.csv(df41,file="/Users/prayash/Downloads/NavyFederalCU_41.csv")
write.csv(df42,file="/Users/prayash/Downloads/AlaskaFederalCU_42.csv")
write.csv(df43,file="/Users/prayash/Downloads/BoeingEmployeesCU_43.csv")
write.csv(df44,file="/Users/prayash/Downloads/IdahoCentralCU_44.csv")
write.csv(df45,file="/Users/prayash/Downloads/EntCU_45.csv")
write.csv(df46,file="/Users/prayash/Downloads/MountainValleyCU_46.csv")
#Creating a corpus
SunCoastCU_1_corpus <- Corpus(VectorSource(SunCoastCU_1_text))
SunCoastCU_1_corpus
inspect(SunCoastCU_1_corpus)
CU_matrix <- TermDocumentMatrix(SunCoastCU_1_corpus)
m <- as.matrix(CU_matrix)
v <- sort(rowSums(m),decreasing = TRUE)
d <- data.frame(word=names(v), frequency=v)
head(d,100)
df <- Reduce(function(x, y) merge(x, y, all=TRUE), list(df1, df2, df3, df4, df5, df6, df7, df8, df9, df10, df11, df12, df13, df14, df15, df16, df17, df18, df19, df20, df21, df22, df23, df24, df25, df26, df27, df28, df29, df30, df31, df32, df33, df34, df35, df36, df37, df38, df39, df40, df41, df42, df43, df44, df45, df46))
write.csv(df1,file="/Users/prayash/Downloads/SunCoastCU_1.csv")
write.csv(df,file="/Users/prayash/Downloads/CU40_RawData.csv")
# Twitter data extraction for Objective2
Credit_insurance_1 <- searchTwitter('insurance cuna',n=1, lang="en",resultType="recent",since = "2015-06-01")
Debt_protection_2 <- searchTwitter('Debt protection credit union',n=2, lang="en",resultType="recent",since = "2015-06-01")
Mortgage_protection_3 <- searchTwitter('mortgage CUNA',n=5, lang="en",resultType="recent",since = "2015-06-01")
Lender_program_4 <- searchTwitter('lender CUNA',n=1, lang="en",resultType="recent",since = "2015-06-01")
loanliner <- searchTwitter('loanliner',n=2, lang="en",resultType="recent",since = "2015-06-01")
cu <- searchTwitter('credit union',n=8697, lang="en",resultType = "mixed")
df_cu <- do.call("rbind", lapply(cu, as.data.frame))
write.csv(df_cu,file="/Users/prayash/Downloads/CU_sentiment.csv")
df_ppp_1 <- do.call("rbind", lapply(Credit_insurance_1, as.data.frame))
df_ppp_2 <- do.call("rbind", lapply(Debt_protection_2, as.data.frame))
df_ppp_3 <- do.call("rbind", lapply(Mortgage_protection_3, as.data.frame))
df_ppp_4 <- do.call("rbind", lapply(Lender_program_4, as.data.frame))
df_ppp_5 <- do.call("rbind", lapply(loanliner, as.data.frame))
df_ppp <- Reduce(function(x, y) merge(x, y, all=TRUE), list(df_ppp_1,df_ppp_2,df_ppp_3,df_ppp_4,df_ppp_5,df))
write.csv(df,file="/Users/prayash/Downloads/CU40_RawData.csv")
|
847fc21ba4895123f973a9b73a1074ef31d9c775
|
d794c2c6908c9b95607fe3ae445b035f58e24355
|
/man/lambda.Rd
|
904e711c4a09f8286ed4d7183b9c7773873ef273
|
[] |
no_license
|
robertzk/magrittr
|
e2224b4045822f7dcbbe888eddec4dd449376080
|
b463edff3e753118b863a7d5c4aefa7ebd575324
|
refs/heads/master
| 2021-01-18T12:18:16.911394
| 2015-02-13T00:22:27
| 2015-02-13T00:22:27
| 30,732,033
| 0
| 0
| null | 2015-02-13T00:06:41
| 2015-02-13T00:06:41
| null |
UTF-8
|
R
| false
| false
| 1,173
|
rd
|
lambda.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{lambda}
\alias{l}
\alias{lambda}
\title{Shorthand notation for anonymous/lambda functions
in magrittr pipelines.}
\usage{
lambda(expr)
l(expr)
}
\arguments{
\item{expr}{A special kind of expression for the anonymous function.
The syntax is \code{symbol ~ expression}, see the examples.}
}
\value{
a function.
}
\description{
This is an alternative syntax for generating anonymous functions.
When used in chains, the call should be enclosed in parentheses to
force evaluation of the function generation before the left-hand side
is inserted.
}
\details{
\code{lambda} has a special syntax, where the expression is defined as
\code{symbol ~ expression}. The alias \code{l} is shorthand for \code{lambda}.
Previous versions used symbol -> expression syntax, but this caused
problems with compiling packages. There is currently a warning if the
old syntax is used.
}
\examples{
lambda(x ~ x^2 + 2*x)
sapply(1:10, lambda(x ~ x^2))
Filter(lambda(x ~ x > 0), rnorm(100))
iris \%>\%
(lambda(dfr ~ rbind(dfr \%>\% head, dfr \%>\% tail)))
1:10 \%>\%
sin \%>\%
(lambda(x ~ {
d <- abs(x) > 0.5
x*d
}))
}
|
5ed700330858836d9364b6822b8402cdc3ae0427
|
cba3e90d5af37d408f2f05824769fba9b16e2ec2
|
/cursos/ACT11302/datosycodigo/ACT11302_151204.R
|
8185f23f3957466a368f228a5cea09c588d7d2ec
|
[] |
no_license
|
ramja/jcmartinezovando.github.io
|
5e301ce152d18dbb52f409f7a45018d07ef65900
|
c4351d78a8a700062dfd0cfbad5c2587857a26b3
|
refs/heads/master
| 2020-04-15T06:38:03.210665
| 2016-01-14T07:45:34
| 2016-01-14T07:45:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,973
|
r
|
ACT11302_151204.R
|
#
# ACT-11302: Calculo Actuarial III
#
# Autor: Juan Carlos Martinez Ovando
# Email: juan.martinez.ovando@itam.mx
#
# Este codigo fue probado en R v.3.1.2
#
rm(list = ls())
## Main:
# install.packages("ggplot")
## Masked:
install.packages("actuar")
install.packages("fExtremes")
install.packages("fitdistrplus")
install.packages("fGarch")
install.packages("fTrading")
install.packages("timeDate")
install.packages("timeSeries")
install.packages("fBasics")
install.packages("survival")
install.packages("splines")
library("actuar")
library("fExtremes")
library("fitdistrplus")
path.datos <- "C:/JCMO.Academia/@Cursos/2015-II_Calculo Actuarial III/_datos"
path.code <- "C:/JCMO.Academia/@Cursos/2015-II_Calculo Actuarial III/_codigo"
# Leemos los datos de siniestros individuales
datos <- read.csv(
paste(path.datos,"/act11302_DanishInsuranceData.csv", sep = ""),
header = TRUE)
# head(datos)
# tail(datos)
# colnames(datos)
# rownames(datos)
data(danishClaims)
write.csv(danishClaims, file = paste(path.datos,"/act11302_danishClaims.csv", sep = ""), row.names = FALSE)
xdatos <- danishClaims[,2]
# Estadisticas descriptivas
summary(xdatos)
# descdist(xdatos, boot = 3000)
# --------------------------------------------------------------------------
# Análisis Descriptivo para Severidades
# --------------------------------------------------------------------------
# Comparación (Gráfica)
plotdist(xdatos, histo = TRUE, demp = TRUE)
# Más gráficas
par(mfrow = c(2, 2))
emdPlot(xdatos)
qqparetoPlot(xdatos)
msratioPlot(xdatos)
# --------------------------------------------------------------------------
# Estimación de Distribuciones para Severidades
# --------------------------------------------------------------------------
# A) Weibull
fit.weibull <- fitdist(xdatos, "weibull")
summary(fit.weibull)
# B) Gamma
fit.gamma <- fitdist(xdatos, "gamma")
summary(fit.gamma)
# C) Lognormal
fit.lnorm <- fitdist(xdatos, "lnorm")
summary(fit.lnorm)
# D) Gumbel
fit.gumbel <- gumbelFit(xdatos)
summary(fit.gumbel)
# E) Generalized Extreme Value
fit.gev <- gevFit(xdatos)
summary(fit.gev)
# F) Generalized Pareto
fit.gpd <- gpdFit(xdatos)
summary(fit.gpd)
# Comparación (Gráfica)
par(mfrow = c(2, 2))
plot.legend <- c("Weibull", "lognormal", "gamma")
denscomp(list(fit.weibull, fit.lnorm, fit.gamma), legendtext = plot.legend)
qqcomp(list(fit.weibull, fit.lnorm, fit.gamma), legendtext = plot.legend)
cdfcomp(list(fit.weibull, fit.lnorm, fit.gamma), legendtext = plot.legend)
ppcomp(list(fit.weibull, fit.lnorm, fit.gamma), legendtext = plot.legend)
# Matriz
is.matrix(datos)
datos <- as.matrix(datos)
# Data Frame
is.data.frame(datos)
datos <- as.data.frame(datos)
# # Graficacion
# ggplot(datos1, aes(x = dens, fill = lines)) + geom_density(alpha = 0.5)
# x <- datos$LossinDKM
# hillPlot(x, start = 15, ci = 0.95, doplot = TRUE, plottype = c("alpha", "xi"), labels = TRUE)
# --------------------------------------------------------------------------
# Distribución Agregada de Severidades
# --------------------------------------------------------------------------
# p.ej. Supongamos que la mejor distribución para las severidades individuales es Pareto Generalizada
# Así, el momento t = 0 necesitamos generar la distribución para S(1)
# S(1)=sum_{i=1}^{N(1)} X_i
#
# --------------------------------------------------------------------------
# Caso 1.- N(1) fijo...
N_1 <- 500 # Número de siniestros para el tiempo t=1
# Pensemos que tan solo la cartera de seguros de auto en México es de 6.5 millones de pólizas
M <- 1000 # Número de simulaciones para S(1)
xdatos_1 <- matrix(NaN, M, N_1)
i <- 1
for(i in 1:N_1){
xdatos_1[,i] <- as.matrix(gpdSim(model = list(xi = 0.4915575, mu = 0, beta = 7.0403588), n = M, seed = NULL))
}
S_1 <- matrix(NaN, M, 1)
m <- 1
for(m in 1:M){
S_1[m] <- sum(xdatos_1[m,])
}
# Más gráficas
par(mfrow = c(2, 2))
hist(S_1,round(M/10))
emdPlot(S_1)
qqparetoPlot(S_1)
msratioPlot(S_1)
# Descripción
summary(S_1)
S_1_VaR <- VaR(S_1, alpha = 0.05, type = "sample", tail = c("lower", "upper"))
S_1_CVaR <- CVaR(S_1, alpha = 0.05, type = "sample", tail = c("lower", "upper"))
# --------------------------------------------------------------------------
# Caso 2.- N(1) aleatorio...
# Supongamos que N(1) ~ Po(\lambda = 500)
lambda <- 500
N_1_sim <- rpois(M, lambda)
S_1_sim <- matrix(NaN, M, 1)
m <-1
for(m in 1:M){
xdatos_aux <- as.matrix(gpdSim(model = list(xi = 0.4915575, mu = 0, beta = 7.0403588), n = N_1_sim[m], seed = NULL))
if(m==1){
xdatos_1_sim <- list(xdatos_aux)
}else{
xdatos_1_sim <- list(xdatos_1_sim, xdatos_aux)
}
S_1_sim[m] <- sum(xdatos_aux)
}
# Más gráficas
par(mfrow = c(2, 2))
hist(S_1_sim,round(M/10))
emdPlot(S_1_sim)
qqparetoPlot(S_1_sim)
msratioPlot(S_1_sim)
# Descripción
summary(S_1_sim)
S_1_VaR <- VaR(S_1_sim, alpha = 0.999, type = "sample", tail = c("lower", "upper"))
S_1_CVaR <- CVaR(S_1_sim, alpha = 0.999, type = "sample", tail = c("lower", "upper"))
# --------------------------------------------------------------------------
# Caso 2.- N(1) aleatorio...
# Supongamos que N(1) ~ Po(\lambda = 500)
lambda <- 500
N_1_sim <- rpois(M, lambda)
S_1_sim <- matrix(NaN, M, 1)
m <-1
for(m in 1:M){
xdatos_aux <- as.matrix(gpdSim(model = list(xi = 0.4915575, mu = 0, beta = 7.0403588), n = N_1_sim[m], seed = NULL))
if(m==1){
xdatos_1_sim <- list(xdatos_aux)
}else{
xdatos_1_sim <- list(xdatos_1_sim, xdatos_aux)
}
S_1_sim[m] <- sum(xdatos_aux)
}
# Más gráficas
par(mfrow = c(2, 2))
hist(S_1_sim,round(M/10))
emdPlot(S_1_sim)
qqparetoPlot(S_1_sim)
msratioPlot(S_1_sim)
# Descripción
summary(S_1_sim)
S_1_VaR <- VaR(S_1_sim, alpha = 0.05, type = "sample", tail = c("lower", "upper"))
S_1_CVaR <- CVaR(S_1_sim, alpha = 0.05, type = "sample", tail = c("lower", "upper"))
# --------------------------------------------------------------------------
# Caso 3.- N(1) aleatorio, con coaseguro (la compañía paga la proporción \alpha del siniestro)
# Supongamos que N(1) ~ Po(\lambda = 500)
lambda <- 500
N_1_sim_coa <- rpois(M, lambda)
S_1_sim_coa <- matrix(NaN, M, 1)
m <-1
alpha_coa <- 0.9
for(m in 1:M){
xdatos_aux_coa <- alpha_coa * as.matrix(gpdSim(model = list(xi = 0.4915575, mu = 0, beta = 7.0403588), n = N_1_sim_coa[m], seed = NULL))
if(m==1){
xdatos_1_sim_coa <- list(xdatos_aux_coa)
}else{
xdatos_1_sim_coa <- list(xdatos_1_sim_coa, xdatos_aux_coa)
}
S_1_sim_coa[m] <- sum(xdatos_aux_coa)
}
# Más gráficas
par(mfrow = c(2, 2))
hist(S_1_sim_coa,round(M/10))
emdPlot(S_1_sim_coa)
qqparetoPlot(S_1_sim_coa)
msratioPlot(S_1_sim_coa)
# Descripción
summary(S_1_sim_coa)
S_1_VaR <- VaR(S_1_sim_coa, alpha = 0.05, type = "sample", tail = c("lower", "upper"))
S_1_CVaR <- CVaR(S_1_sim_coa, alpha = 0.05, type = "sample", tail = c("lower", "upper"))
#
# -- FIN: ACT11302_151204.R --
|
bcf0e4c9ecd986780de0c6a50afa26e00afcc87f
|
e4ad2398aa4b2d308ba0ec11803d58e36bba43d5
|
/R/qcs.cpn.r
|
39d63783810f0e1e1c1fc2c665237261ae11c101
|
[] |
no_license
|
mflores72000/qcr
|
2204b2810a24a91bee75ef68094feaf6198746bd
|
4b07dcc8bdc2293ed0504d438e835b9562746612
|
refs/heads/main
| 2023-06-08T04:46:35.286754
| 2023-05-30T16:06:10
| 2023-05-30T16:06:10
| 387,871,922
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,655
|
r
|
qcs.cpn.r
|
#-----------------------------------------------------------------------------#
# #
# QUALITY CONTROL STATISTICS IN R #
# #
# An R package for statistical in-line quality control. #
# #
# Written by: Miguel A. Flores Sanchez #
# Professor of the Mathematics Department #
# Escuela Politecnica Nacional, Ecuador #
# miguel.flores@epn.edu.ec #
# #
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
# Main function to create a 'qcs.cpn' object
#-----------------------------------------------------------------------------#
##' Process capability indices (Nonparametric)
##'
##' Calculates \eqn{CNp}{CNpk}, \eqn{CNpm}{CNpmk} using the formulation
##' described by Tong and Chen (1998).
##' @aliases qcs.cpn
##' @param object qcs object of type \code{"qcs.xbar"} or \code{"qcs.one"}.
##' @param parameters A vector specifying the \code{u} and \code{v} parameters values.
##' If \code{parameters} = c(u=0, v=0), the CNp indice is calculed;
##' If \code{parameters} = c(u=1, v=0), the CNpk indice is calculed;
##' If \code{parameters} = c(u=0, v=1), the CNpm indice is calculed;
##' If \code{parameters} = c(u=1, v=1), the CNpmk indice is calculed.
##' @param limits A vector specifying the lower and upper specification limits.
##' @param q A vector specifying the lower and upper quantiles. These values are
##' necessary, if \code{object} value is missing.
##' @param target A value specifying the target of the process.
##' If it is \code{NULL}, the target is set at the middle value between specification limits.
##' @param median A value specifying the median of data.
##' @param nsigmas A numeric value specifying the number of sigmas to use.
##' @param confidence A numeric value between 0 and 1 specifying the probabilities
##' for computing the quantiles.
##' This values is used only when \code{object} values is provided.
##' By default \code{confidence}=0.9973.
##' @export
##' @references
##' Montgomery, D.C. (1991) \emph{Introduction to Statistical Quality Control}, 2nd
##' ed, New York, John Wiley & Sons. \cr
##' Tong, L.I. and Chen, J.P. (1998), \emph{Lower confidence limits of process capability
##' indices for nonnormal process distributions.} International Journal of Quality & Reliability Management,
##' Vol. 15 No. 8/9, pp. 907-19.\cr
##' @examples
##' library(qcr)
##' ##' data(pistonrings)
##' xbar <- qcs.xbar(pistonrings[1:125,],plot = TRUE)
##' x<-xbar$statistics[[1]]
##' LSL=73.99; USL=74.01
##' median <-median(x)
##' lq=as.numeric(quantile(x,probs=0.00135))
##' uq=as.numeric(quantile(x,probs=0.99865))
##' qcs.cpn(parameters = c(0,0),limits = c(LSL,USL),
##' median = median, q=c(lq,uq))
### all capacibility indices
##' qcs.cpn(object = xbar,parameters = c(0,0), limits = c(LSL,USL))
##' qcs.cpn(object = xbar,parameters = c(1,0), limits = c(LSL,USL))
##' qcs.cpn(object = xbar,parameters = c(0,1), limits = c(LSL,USL))
##'qcs.cpn(object = xbar,parameters = c(1,1), limits = c(LSL,USL))
qcs.cpn <- function(object, parameters = c(u = 0,v = 0), limits = c(lsl = -3, usl = 3),
q = c(lq = -3, uq = 3),
target = NULL, median = 0, nsigmas = 3,confidence = 0.9973){
if (!missing(object)){
if (!inherits(object, "qcs"))
stop("an object of class 'qcs' is required")
if (!(object$type == "xbar" | object$type == "one"))
stop("Process Capability Analysis only available for charts type
\"qcs.xbar\" and \"qcs.one\" charts")
q1<-(1-confidence)/2
q2<-confidence+q1
x <- object[[3]][,1]
F2=as.numeric(quantile(x,probs=q2))
F1=as.numeric(quantile(x,probs=q1))
median <- median(x)
}else{
F1 <- q[1]
F2 <- q[2]
}
if (nsigmas <= 0)
stop("nsigmas must be a value positive")
confidence = 1- 2*pnorm(-nsigmas)
std.dev <- (F2-F1)/6
if (length(limits)!=2)
stop("specification limits must be two")
lsl <- limits[1]
usl <- limits[2]
if (lsl>= usl)
stop("lsl >= usl")
if (!(is.numeric(usl) & is.finite(lsl)))
lsl <- NA
if (!(is.numeric(usl) & is.finite(lsl)))
usl <- NA
if (is.na(lsl) & is.na(usl))
stop("invalid specification limits")
if (is.null(target)) target <- mean(limits, na.rm = TRUE)
if (is.na(lsl)) {
if (target > usl)
warning("target value larger than one-sided specification limit...")
}
if (is.na(usl)) {
if (target < lsl)
warning("target value smaller than one-sided specification limit...")
}
if (!is.na(lsl) & !is.na(usl)) {
if (target < lsl || target > usl)
warning("target value is not within specification limits...")
}
m <- (lsl+usl)/2
d <- (usl-lsl)/2
u <- parameters[1]
v <- parameters[2]
ind <- (d-u*abs(median-m))/(nsigmas*std.dev*sqrt(1+v*((median-target)/std.dev)^2))
if (u == 0 & v == 0) names(ind) <- c("CNp")
if (u == 1 & v == 0) names(ind) <- c("CNpk")
if (u == 0 & v == 1) names(ind) <- c("CNpm")
if (u == 1 & v == 1) names(ind) <- c("CNpmk")
result <-round(ind,4)
return(result)
}
|
b9804d76299e3682eb6689971703f2bcd18876b8
|
d5facf2eb1940a5ef24399017845e17ca172ebf3
|
/man/writeRaster2.Rd
|
36e31a84fc649cd13cfc948a846c0453bd9a325b
|
[] |
no_license
|
ailich/mytools
|
3970d0254b4bc9b7bb23b2918f99ec7e966ddbbe
|
2e8b244974483df793ae000d8a44f8904e44bc9a
|
refs/heads/master
| 2023-01-13T23:41:50.623083
| 2022-12-29T17:56:54
| 2022-12-29T17:56:54
| 117,773,800
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 767
|
rd
|
writeRaster2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/writeRaster2.R
\name{writeRaster2}
\alias{writeRaster2}
\title{Modification of writeRaster that can delete auxillary files}
\usage{
writeRaster2(x, filename, overwrite = TRUE, ...)
}
\arguments{
\item{x}{Raster* object}
\item{filename}{Output filename}
\item{overwrite}{Logical. If TRUE, "filename" will be overwritten if it exists. Additionally, auxillary files related to this raster will be deleted}
\item{...}{other arguments passed to writeRaster}
}
\description{
Modification of writeRaster that can delete auxillary files. If overwrite is set to TRUE (the default) it will overwrite raster file and delete all associated files (e.g. pyramid files). Might only work on Windows.
}
|
cc21859d0200a8d0d1004f6fb4ba0804343b228d
|
e32541a3498bc9618c21d8322fb5da16c466cc69
|
/retiring/doregression/Samples/Sparklines.R
|
3ae86f7115153b83862dd811f477871fc39f91f8
|
[] |
no_license
|
StefanoPicozzi/r-base
|
96b580ef7f6480b8eaaa76ba1b3f80ac562149f1
|
06946b23d9c4a40e238015d2c28ba6cd0978b2b0
|
refs/heads/master
| 2021-01-10T15:46:13.631314
| 2016-01-03T19:51:06
| 2016-01-03T19:51:06
| 48,297,497
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,250
|
r
|
Sparklines.R
|
library("lattice")
library("MASS")
#library("YaleToolkit")
ppi <- 300
noDays <- 62
noWeeks <- 10
dayOfWeek <- c(
"1-Th", "Fr", "Sa", "Su", "Mo", "Tu",
"2-We", "Th", "Fr", "Sa", "Su", "Mo", "Tu",
"3-We", "Th", "Fr", "Sa", "Su", "Mo", "Tu",
"4-We", "Th", "Fr", "Sa", "Su", "Mo", "Tu",
"5-We", "Th", "Fr", "Sa", "Su", "Mo", "Tu",
"6-We", "Th", "Fr", "Sa", "Su", "Mo", "Tu",
"7-We", "Th", "Fr", "Sa", "Su", "Mo", "Tu",
"8-We", "Th", "Fr", "Sa", "Su", "Mo", "Tu",
"9-We", "Th", "Fr", "Sa", "Su", "Mo", "Tu"
)
setwd("/Users/stefanopicozzi/Google Drive/ANU/StefanoPicozzi/Study 1 - August 2013/Tools/R")
getwd()
preData <- read.delim("Pre.csv", header = T, sep = ",")
dailyData <- read.delim("Daily.csv", header = T, sep = ",")
dailyData <- dailyData[order(dailyData$ID, dailyData$DayNo),]
dailyData[1:5]
participants <- preData$ID
list = c()
for (p in participants) {
list <- append(list, p)
}
print(list)
for (p in list) {
print(paste("------>", p))
pDailyData <- c()
pDailyData <- subset( dailyData, ID == p)
if (nrow(pDailyData) < 1) { next }
startDay <- max(pDailyData$DayNo) - 14
lastDay <- max(pDailyData$DayNo)
pDailyData <- subset( pDailyData, DayNo >= startDay)
len <- nrow(pDailyData)
if ( p == "PR63" ) {
print(pDailyData[1:5])
}
fileName = paste("out/Daily/Participant/", p, "/sparklines.png", sep = "")
png(paste(fileName, sep=""),
res = 72,
width = 800,
height = 800,
pointsize = 16,
units = "px")
par(mfrow = c(6, 1), mar = c(3, 4, 2, 1), oma = c(4, 0, 1, 2))
# par(mfrow = c(6, 1), mar = c(5, 4, 2, 4) + 0.1, oma = c(4, 0, 2, 2) )
plot(pDailyData$DayNo, pDailyData$PracMins, axes = F, ylab = "", xlab = "", main = "", type = "l")
mtext("Daily Dashboard\n Last 14 Days of Recordings", cex = 0.8)
axis(2, at = seq(0, 120, by = 20), las = 2, cex.axis = 0.7)
lastY <- pDailyData$PracMins[len]
maxY <- max(pDailyData$PracMins)
minY <- min(pDailyData$PracMins)
points(x = lastDay, y = lastY, col = "deepskyblue1", pch = 19, cex = 2)
text(x = lastDay, y = maxY, labels = maxY, pos = 1, cex = 1.5, col = "green", offset = 0.8)
text(x = lastDay, y = lastY, labels = lastY, pos = 1, cex = 1.5, col = "blue", offset = 2.0)
text(x = lastDay, y = minY, labels = minY, pos = 1, cex = 1.5, col = "red", offset = 3.4)
loc <- par("usr")
text(loc[1], loc[4], "Practice Mins", pos = 3, xpd = T)
plot(pDailyData$DayNo, pDailyData$PQMScore, axes = F, ylab = "", xlab = "", main = "", type = "l")
axis(2, at = seq(0, 10, by = 1), las = 2, cex.axis = 0.7)
lastY <- pDailyData$PQMScore[len]
# maxY <- max(pDailyData$PQMScore)
# minY <- min(pDailyData$PQMScore)
points(x = lastDay, y = lastY, col = "deepskyblue1", pch = 19, cex = 1)
text(x = lastDay, y = maxY, labels = maxY, pos = 1, cex = 1.5, col = "green", offset = 0.8)
text(x = lastDay, y = lastY, labels = lastY, pos = 1, cex = 1.5, col = "blue", offset = 2.0)
text(x = lastDay, y = minY, labels = minY, pos = 1, cex = 1.5, col = "red", offset = 3.4)
loc <- par("usr")
text(loc[1], loc[4], "Practice Quality", pos = 3, xpd = T)
plot(pDailyData$DayNo, pDailyData$MAASScore, axes = F, ylab = "", xlab = "", main = "", type = "l")
axis(2, at = seq(0, 10, by = 1), las = 2, cex.axis = 0.7)
lastY = pDailyData$MAASScore[len]
# maxY <- max(pDailyData$MAASScore)
# minY <- min(pDailyData$MAASScore)
points(x = lastDay, y = lastY, col = "deepskyblue1", pch = 19, cex = 1)
text(x = lastDay, y = maxY, labels = maxY, pos = 1, cex = 1.5, col = "green", offset = 0.8)
text(x = lastDay, y = maxY, labels = lastY, pos = 1, cex = 1.5, col = "blue", offset = 2.0)
text(x = lastDay, y = maxY, labels = minY, pos = 1, cex = 1.5, col = "red", offset = 3.4)
loc <- par("usr")
text(loc[1], loc[4], "MAAS Score", pos = 3, xpd = T)
plot(pDailyData$DayNo, pDailyData$WellScore,, axes = F, ylab = "", xlab = "", main = "", type = "l")
axis(2, at = seq(0, 30, by = 2), las = 2, cex.axis = 0.7)
lastY <- pDailyData$WellScore[len]
# maxY <- max(pDailyData$WellScore)
# minY <- min(pDailyData$WellScore)
points(x = lastDay, y = lastY, col = "deepskyblue1", pch = 19, cex = 1)
text(x = lastDay, y = maxY, labels = maxY, pos = 1, cex = 1.5, col = "green", offset = 0.8)
text(x = lastDay, y = maxY, labels = lastY, pos = 1, cex = 1.5, col = "blue", offset = 2.0)
text(x = lastDay, y = maxY, labels = minY, pos = 1, cex = 1.5, col = "red", offset = 3.4)
loc <- par("usr")
text(loc[1], loc[4], "Wellbeing Score", pos = 3, xpd = T)
plot(pDailyData$DayNo, pDailyData$PANASNAScore,, axes = F, ylab = "", xlab = "", main = "", type = "l")
axis(2, at = seq(0, 30, by = 2), las = 2, cex.axis = 0.7)
lastY <- pDailyData$PANASNAScore[len]
# maxY <- max(pDailyData$PANASNAScore)
# minY <- min(pDailyData$PANASNAScore)
points(x = lastDay, y = lastY, col = "deepskyblue1", pch = 19, cex = 1)
text(x = lastDay, y = maxY, labels = maxY, pos = 1, cex = 1.5, col = "green", offset = 0.8)
text(x = lastDay, y = maxY, labels = lastY, pos = 1, cex = 1.5, col = "blue", offset = 2.0)
text(x = lastDay, y = maxY, labels = minY, pos = 1, cex = 1.5, col = "red", offset = 3.4)
loc <- par("usr")
text(loc[1], loc[4], "PANAS-NA Score", pos = 3, xpd = T)
plot(pDailyData$DayNo, pDailyData$PANASPAScore,, axes = F, ylab = "", xlab = "", main = "", type = "l")
axis(2, at = seq(0, 30, by = 2), las = 2, cex.axis = 0.7)
lastY <- pDailyData$PANASPAScore[len]
# maxY <- max(pDailyData$PANASPAScore)
# minY <- min(pDailyData$PANASPAScore)
points(x = lastDay, y = lastY, col = "deepskyblue1", pch = 19, cex = 1)
text(x = lastDay, y = maxY, labels = maxY, pos = 1, cex = 1.5, col = "green", offset = 0.8)
text(x = lastDay, y = maxY, labels = lastY, pos = 1, cex = 1.5, col = "blue", offset = 2.0)
text(x = lastDay, y = maxY, labels = minY, pos = 1, cex = 1.5, col = "red", offset = 3.4)
loc <- par("usr")
text(loc[1], loc[4], "PANAS-PA Score", pos = 3, xpd = T)
axis(1, pos = c(-1), at = seq(startDay, lastDay, by = 1), cex = 0.7)
loc <- par("usr")
mtext(" Day No.", adj = 0, side = 1, outer = TRUE, cex = 0.7)
dev.off()
#sparklines(dailySparks)
}
|
2c707f92c4bc87e32e641bae38cde1ef876a7ed9
|
9e6c6d3ea78d408a6746fcdeca6ff0d3a8a3308c
|
/man/convert_et.Rd
|
14a4683526a9e21a173d6b33c8539d3a6fc20ed5
|
[] |
no_license
|
stineb/rbeni
|
36f28d38f58301d2af24255e9d63fe5ac6809ebe
|
2f9d26d0a286c550cb90ee9d30a1f2b6c3b112f6
|
refs/heads/master
| 2023-02-18T22:18:52.856980
| 2023-02-16T17:29:09
| 2023-02-16T17:29:09
| 167,402,490
| 3
| 6
| null | 2020-09-25T09:35:32
| 2019-01-24T16:49:15
|
R
|
UTF-8
|
R
| false
| true
| 939
|
rd
|
convert_et.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convert_et.R
\name{convert_et}
\alias{convert_et}
\title{Convert evapotranspiration to mm}
\usage{
convert_et(et_e, tc, elv = 0, return_df = FALSE)
}
\arguments{
\item{et_e}{A numeric value or vector specifying vapotranspiration in energy units (W m-2)}
\item{tc}{A numeric value or vector specifying temperature in degrees Celsius}
\item{elv}{A numeric value or vector specifying elevation above sea level (m). Defaults to 0.}
\item{return_df}{A logical specifying whether a data frame (single column for ET) should be returned.}
}
\value{
A numeric value or vector, or, if \code{return_df = TRUE}, a data frame (tibble) with ET
values in mass units (mm).
}
\description{
Converts evapotranspiration (ET) measurements given in energy units (here W m-2)
to mass units (here mm water, corresponding to kg m-2). Adopted from SPLASH (Davis et al., 2017 GMD).
}
|
8a8f630703468b10e18a3110154c98eed47537e1
|
2e627e0abf7f01c48fddc9f7aaf46183574541df
|
/PBStools/man/imputeRate.Rd
|
428657c90f6efee0d049849fb64038f4bf366b03
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
pbs-software/pbs-tools
|
30b245fd4d3fb20d67ba243bc6614dc38bc03af7
|
2110992d3b760a2995aa7ce0c36fcf938a3d2f4e
|
refs/heads/master
| 2023-07-20T04:24:53.315152
| 2023-07-06T17:33:01
| 2023-07-06T17:33:01
| 37,491,664
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,581
|
rd
|
imputeRate.Rd
|
\name{imputeRate}
\alias{imputeRate}
\title{Impute Rate of Return for an Investment}
\description{
Impute the rate of return for an investment that experiences
regular or irregular contributions and/or withdrawals.
}
\usage{
imputeRate(qtName="Ex03_Portfolio", dbName="Examples",
AID=1, pathN=2, hnam=NULL)
}
\arguments{
\item{qtName}{Name of query or table in a Microsoft ACCESS file (\code{.mdb}).}
\item{dbName}{Name of the Microsoft ACCESS file (\code{.mdb}).}
\item{AID}{Numeric specifying account ID.}
\item{pathN}{Numeric specifying path: \code{1} = current working directory,
\code{2} = SQL directory \cr \code{.../library/PBStools/sql}.}
\item{hnam}{Name of a history file.}
}
\details{
This function creates an interactive GUI that can be used
to impute the rate of return for an investment account or
for a simulated investment. The code adopts the formula for
the \dQuote{internal rate of return} used in Microsoft Excel.
The input data must contain the fields: \cr
\code{AID.....}Account ID number; \cr
\code{date....}Date of the account valuation; \cr
\code{value...}Value of the account as of \code{date}; \cr
\code{cont....}Total contributions/withdrawals from the previous date
up to and including the current date. \cr
\bold{The GUI controls:}
\tabular{ll}{
\code{Data} \tab Open the \code{.mdb} database.\cr
\code{R code} \tab View the function \code{imputeRate}.\cr
\code{Window} \tab View the \emph{window description file}.\cr
\code{MDB} \tab Microsoft Access database name (no extesion).\cr
\code{>} \tab Displays available \code{.mdb} files on the specified path (below).\cr
\code{Table} \tab Table or query that contains account information.\cr
\code{>} \tab Displays tables and queries in MDB, choose one.\cr
\code{GET} \tab Get the data from the chosen table.\cr
\code{MDB path} \tab Choice of \code{cwd} (current working directory) or \code{sql} (package SQL directory).\cr
\bold{Inputs} \tab \cr
\code{Account #} \tab Account ID number.\cr
\code{parVec} \tab Data frame specifying \code{val}, \code{min}, \code{max}, \code{active} for parameter \code{rate}.\cr
\code{autoD} \tab If \code{TRUE}, get the date limits of the account automatically.\cr
\code{Start date} \tab Starting date of the investment time series to calculate \code{rate}.\cr
\code{End date} \tab Ending date of the investment time series to calculate \code{rate}.\cr
\code{period} \tab Periods in data source corresponding to the date limits specified.\cr
\bold{Estimation} \tab \cr
\code{Method} \tab Choose one of various non-linear estimation routines.\cr
\code{Controls} \tab Various controls used by \code{calcMin}.\cr
\code{Reset} \tab Button resets \code{parVec} and output boxes (below).\cr
\code{ReInit} \tab Button sets \code{parVec}'s \code{val} to the last estimated \code{prate} (below).\cr
\code{RUN} \tab Runs the rate estimation for the investment interval chosen.\cr
\bold{Simulate} \tab \cr
\code{No Yes Again} \tab Choose to simulate investment data using a random pareto distribution.\cr
\code{start} \tab Starting value of the investment.\cr
\code{rate} \tab True rate of return per period.\cr
\code{nper} \tab Number of periods to run the simulation.\cr
\code{up} \tab Proportion of the time that a contribution is made vs. a withdrawal.\cr
\code{k} \tab Pareto distribution parameter (volatility decreases as k increases).\cr
\bold{Outputs} \tab \cr
\code{decimal places} \tab Number of decimal places for display output.\cr
\code{Iters Evals} \tab Number of iterations and evaluations for the estimation.\cr
\code{prate arate} \tab Starting period rate and annualised rate.\cr
\code{Ctime Etime} \tab Computer and evaluation times (seconds).\cr
\code{prate arate} \tab Estimated period rate and annualised rate.\cr
\code{Fmin0 Fmin} \tab Initial function evaluation and the final function value at minimization.\cr
\code{AIC AICc} \tab Aikike Information Criterion (model fit) and corrected AIC.\cr
\code{message} \tab Message box area reporting results of the minimization.\cr
}
}
\seealso{
\code{\link[PBStools]{calcMA}}, \code{\link[PBStools]{glimmer}}, \code{\link[PBStools]{trend}}
}
\keyword{hplot}
\keyword{optimize}
|
c0335214567dd60212865df8b9a54cf15d116faa
|
4307ddbb84c4973aaa728ddebbd4e4b0b1537096
|
/R/data.R
|
e83e310bccf80e5b6e627892048ac5e2034ac3fa
|
[] |
no_license
|
ms609/TreeDistData
|
8157ab67959ea813c39d98176d6a02480c8a9199
|
15d4901e6bbd639c590c1f8875742db08c0c3f15
|
refs/heads/master
| 2021-06-13T19:52:13.475387
| 2021-05-18T09:24:20
| 2021-05-18T09:24:20
| 196,380,775
| 0
| 1
| null | 2021-05-06T10:19:13
| 2019-07-11T11:23:58
|
R
|
UTF-8
|
R
| false
| false
| 13,006
|
r
|
data.R
|
#' Bullseye test results
#'
#' Implementation and results of a 'Bullseye' test, after that proposed by
#' Kuhner and Yamato (2015).
#'
#' @format
#'
#' `bullseyeTrees` is a list with four elements, named `5 leaves`, `10 leaves`,
#' `20 leaves` and `50 leaves`.
#' Each element contains 1\ifelse{html}{ }{,}000 trees with _n_ leaves, randomly sampled
#' (note: *not* from the uniform distribution) using [`ape::rtree()`].
#'
#' The `bullseyeMorph` prefix refers to the 'subsampling' experiment
#' described by Smith (2020); the `bullMoDi` prefix refers to the
#' 'miscoding' experiment.
#'
#' `bull...Inferred` is a list with four elements, named as in `bullseyeTrees`.
#' Each element contains 1\ifelse{html}{ }{,}000 sub-elements.
#' Each sub-element is a list of
#' ten trees, which have been inferred from progressively more degraded datasets,
#' originally simulated from the corresponding tree in `bullseyeTrees`.
#'
#' `bull...Scores` is a list with four elements, named as in `bullseyeTrees`.
#' Each element contains a three dimensional array, in which the first dimension
#' corresponds to the progressive degrees of degradation, labelled according to
#' the number of characters present or the percentage of tokens switched;
#' the second dimension is named with an abbreviation of the tree similarity /
#' distance metric used to score the trees (see 'Methods tested' below),
#' and the third dimension contains 1\ifelse{html}{ }{,}000
#' entries corresponding to the trees in `bullseyeTrees`.
#' Each cell contains the distance between the inferred tree and the generative
#' tree under the stated tree distance metric.
#'
#' @templateVar vignette 07-bullseye
#' @template seeVignette
#' @templateVar nni_t TRUE
#' @template methodsTested
#' @template dataRaw
#' @references
#' \insertRef{Kuhner2015}{TreeDistData}
#' @template methodRefs
#'
#' @encoding UTF-8
#' @name bullseye
#' @rdname bullseye
'bullseyeTrees'
#' @rdname bullseye
'bullMoDiInferred'
#' @rdname bullseye
'bullMoDiScores'
#' @rdname bullseye
'bullseyeMorphInferred'
#' @rdname bullseye
'bullseyeMorphScores'
#' Distances between random pairs of trees
#'
#' `distanceDistribution25(/50)` are two-dimensional matrices listing the
#' normalized distances between random pairs of bifurcating trees with 25 and
#' 50 leaves drawn from the uniform distribution using
#' [`TreeTools::RandomTree()`] (data object [`randomTreePairs25`]`(/50)`).
#' `pectinateDistances11` reports distances between a pectinate 11-leaf tree
#' and 100\ifelse{html}{ }{,}000 random binary trees.
#'
#' @format
#' Objects of class `matrix` (inherits from `array`) with
#' `r dim(distanceDistribution25)[1]` rows, each corresponding
#' to a tree distance method and is named with its abbreviation
#' (listed in 'Methods tested' below), and
#' 10\ifelse{html}{ }{,}000 (`distanceDistribution25/50`)
#' or 100\ifelse{html}{ }{,}000 (`pectinateDistances11`)
#' columns, listing the calculated distances between each pair of trees.
#'
#' @templateVar nni_t TRUE
#' @template allDistMethods
#' @details
#' # Methods tested
#' - `mafi` (`pectinateDistances11` only): information content of the
#' maximum agreement forest (Smith 2020).
#'
#' @template dataRaw
#' @template methodRefs
#'
#' @seealso Tree pairs between which distances were calculated are available
#' in data objects [`randomTreePairs25`] and [`randomTreePairs50`].
#'
#' @name distanceDistributions
#' @encoding UTF-8
NULL
#' @rdname distanceDistributions
"distanceDistribution25"
#' @rdname distanceDistributions
"distanceDistribution50"
#' @rdname distanceDistributions
"pectinateDistances11"
#' Evaluating tree distance metrics by cluster recovery
#'
#' An effective measure of tree distance will recover clusters of similar
#' trees. These datasets contain the results of tests modelled on those
#' in Lin _et al._ (2012).
#'
#' I used three approaches to generate clusters of similar trees, and tested
#' each metric in its ability to recover these clusters (Lin _et al._, 2012).
#'
#' For the first test, I generated 500 datasets of 100 binary trees with
#' _n_ = 40 leaves.
#' Each set of trees was created by randomly selecting two _k_-leaf
#' 'skeleton' trees, where _k_ ranges from 0.3 _n_ to 0.9 _n_.
#' From each skeleton, 50 trees were generated by adding each of the remaining
#' _n_ - _k_ leaves in turn at a uniformly selected point on the tree.
#'
#' For the second and third test, each dataset was constructed by selecting at
#' random two binary 40-leaf trees.
#' From each starting tree, I generated 50 binary trees by conducting _k_
#' leaf-label interchange (LLI) operations (test two) or _k_ subtree prune and
#' regraft (SPR) operations (test three) on the starting tree.
#' An LLI operation swaps the positions of two randomly selected leaves,
#' without affecting tree shape; an SPR operation moves a subtree to a new
#' location within the tree.
#'
#' For each dataset, I calculated the distance between each pair of trees.
#' Trees where then partitioned into clusters using five methods,
#' using the packages \pkg{stats} and \pkg{cluster}.
#' I define the success rate of each distance measure as the proportion of
#' datasets in which every tree generated from the same skeleton was placed
#' in the same cluster.
#'
#' @format A three-dimensional array.
#'
#' Rows correspond to the clustering methods:
#'
#' - `spc`: spectral clustering
#'
#' - `pam`: partitioning around medioids
#'
#' - `h...`: hierarchical clustering using:
#' `h.cmp`, complete;
#' `h.sng`, single; and
#' `h.avg`, average linkage.
#'
#' Columns correspond to distance metrics; see 'Methods tested' below.
#'
#' Slices correspond to values of _k_:
#'
#' - `linTestOneResults`: _k_ = 30, 40, 50, 60, 70
#'
#' - `linTestTwoResults`: _k_ = 10, 20, 30, 40
#'
#' - `linTestSPRResults`: _k_ = 30, 40, 50, 60, 70
#'
#'
#' @templateVar vignette 06-lin-cluster-recovery
#' @template seeVignette
#'
#' @templateVar nni_t FALSE
#' @template methodsTested
#'
#' @template dataRaw
#'
#' @template methodRefs
#' @references \insertRef{Lin2012}{TreeDistData}
#' @name linTests
#' @rdname linTests
'linTestOneResults'
#' @rdname linTests
'linTestTwoResults'
#' @rdname linTests
'linTestSPRResults'
#' Mean distances between random pairs of trees
#'
#' A three-dimensional array listing the distances between
#' 1\ifelse{html}{ }{,}000
#' random pairs of trees drawn from the uniform distribution using
#' `RandomTree(nTip, root = TRUE)`.
#'
#' Distances were calculated using [`AllDists()`]; see the documentation at
#' there for details of methods and their normalization.
#'
#' Rows are named with abbreviations of the tree comparison metrics tested
#' (see 'Methods tested' below).
#'
#' Columns list the summary statistics of calculated tree distances: the
#' minimum (`min`),
#' 1%, 5%, 10%, 25%, 50% (i.e. median), 75%, 90%, 95%, 99% percentiles,
#' maximum (`max`), mean (`mean`) and standard deviation (`sd`).
#'
#' The third dimension lists the number of leaves in the trees compared.
#'
#' @templateVar vignette 09-expected-similarity
#' @template seeVignette
#' @template dataRaw
#' @templateVar nni_t FALSE
#' @template allDistMethods
#' @template methodRefs
#' @encoding UTF-8
"randomTreeDistances"
#' Pairs of random trees
#'
#' Lists of 10\ifelse{html}{ }{,}000 pairs of binary trees
#' drawn from the uniform distribution using [`TreeTools::RandomTree()`].
#'
#' @seealso
#' The distances between these pairs of trees are recorded in
#' the data objects [`distanceDistribution25`] and [`distanceDistribution50`].
#'
#' Corrrelation of these distances with differences in tree balance,
#' measured using the total cophenetic index (Mir _et al._ 2013),
#' are recorded in [`balance25`] and [`balance25`].
#'
#' @template dataRaw
#' @references
#' \insertRef{Mir2013}{TreeTools}
#' @name randomTreePairs
#' @encoding UTF-8
NULL
#' @rdname randomTreePairs
"randomTreePairs25"
#' @rdname randomTreePairs
"randomTreePairs50"
#' Correlation between tree distances and tree balance
#'
#' The balance of each tree in the lists [`distanceDistribution25`] and
#' [`distanceDistribution50`] was quantified using the total cophenetic
#' index (Mir _et al._ 2013).
#' The difference in balance for each pair of trees was then correlated with
#' the distance between those trees, and the r² value recorded.
#'
#' @template dataRaw
#' @references
#' \insertRef{Mir2013}{TreeTools}
#' @name treeBalance
#' @encoding UTF-8
NULL
#' @rdname treeBalance
"balance25"
#' @rdname treeBalance
"balance50"
#' Distances between unrooted seven-leaf trees
#'
#' Distances between each possible pairing of the 945 unrooted seven-leaf trees
#' (equivalent to rooted 6-leaf trees). Following Kendall and Colijn (2016).
#'
#' Each list entry is named with the abbreviation of the corresponding tree
#' distance method (see 'Methods tested' below).
#'
#' Each item in the list contains a 945×945 matrix reporting the distance
#' between each pair of seven-leaf trees. The first 630 trees are pectinate
#' (tree shape 0), the final 315 are balanced (tree shape 1).
#'
#' @templateVar nni_t TRUE
#' @template allDistMethods
#'
#' @examples
#' library('TreeTools', quietly = TRUE, warn.conflicts = FALSE)
#'
#' # Pectinate unrooted tree shape:
#' plot(UnrootedTreeWithShape(0, 7))
#'
#' # Balanced unrooted tree shape:
#' plot(UnrootedTreeWithShape(1, 7))
#' @template dataRaw
#' @template methodRefs
#'
#' @encoding UTF-8
"sevenTipDistances"
#' Shape effect
#'
#' Results of tests exploring the influence of tree shape on reconstructed
#' tree distances.
#'
#' For each of the four binary unrooted tree shapes on eight leaves, I labelled
#' leaves at random until I had generated 100 distinct trees.
#'
#' I measured the distance from each tree to each of the other 399 trees.
#'
#' @templateVar vignette 05-tree-shape
#' @template seeVignette
#'
#' @format A list of length `r length(shapeEffect)`.
#' Each entry of the list is named according to the abbreviation of the
#' corresponding method (see 'Methods tested' below).
#'
#' Each entry is itself a list of ten elements. Each element contains a numeric
#' vector listing the distances between each pair of trees with shape _x_ and
#' shape _y_, where:
#'
#' `x = 1, 1, 1, 1, 2, 2, 2, 3, 3, 4`
#' and
#' `y = 1, 2, 3, 4, 2, 3, 4, 3, 4, 4`.
#'
#' As trees are not compared with themselves (to avoid zero distances), elements
#' where _x_ = _y_ contain 4\ifelse{html}{ }{,}950 distances,
#' whereas other elements contain 5\ifelse{html}{ }{,}050
#' distances.
#'
#' @templateVar nni_t TRUE
#' @template methodsTested
#' @template methodRefs
#' @template dataRaw
#' @encoding UTF-8
'shapeEffect'
#' Tree distance and SPR moves
#'
#' Datasets testing whether separating trees by increasingly many moves
#' results in a corresponding increase in their distance.
#'
#' I generated a chain of 100 50-leaf trees, starting from a pectinate tree
#' and deriving each tree in turn by performing an SPR operation on the previous
#' tree.
#' A consistent measure of tree similarity should correlate with the number of
#' SPR operations separating a pair of trees in this chain.
#' This said, because one SPR operation may counteract some of the difference
#' introduced by a previous one, perfect correlation is unlikely.
#'
#' @format A list of length 21.
#' Each entry is named according to the corresponding tree distance method; see
#' 'Methods tested' below.
#'
#' Each member of the list is a 100 × 100 matrix listing the distance
#' between each pair of trees in the SPR chain (see 'Details'),
#' numbered from 1 to 100.
#'
#'
#' @templateVar vignette 08-spr-walking
#' @template seeVignette
#' @templateVar nni_t TRUE
#' @template methodsTested
#' @template methodRefs
#' @template dataRaw
#' @encoding UTF-8
'sprDistances'
#' Method parameters
#'
#' Metadata for methods examined in this package.
#'
#' `tdAbbrevs` lists abbreviations for each method, using expressions to allow
#' formatting of text when plotted.
#'
#' `tdPlotSequence` lists the 20 methods discussed in the main article,
#' in the sequence in which they are plotted in figures.
#'
#' `tdMdAbbrevs` uses markdown formatting.
#'
#' `tdBoxAbbrevs` uses line breaks to fit abbreviations in an approximately
#' square bounding box.
#'
#' `tdCol` provides each method with a suitable plotting colour.
#'
#' `TDFunctions` lists for each method a function that will calculate the
#' distance between two trees or lists of trees.
#'
#' `TDPair` lists for each method a function to calculate the distance
#' between one tree (`tr`) and another tree (`ref`).
#'
#' @template dataRaw
#' @name TreeDistMethods
#' @rdname TreeDistMethods
'tdAbbrevs'
#' @rdname TreeDistMethods
'tdPlotSequence'
#' @rdname TreeDistMethods
'tdMdAbbrevs'
#' @rdname TreeDistMethods
'tdBoxAbbrevs'
#' @rdname TreeDistMethods
'tdMethods'
#' @rdname TreeDistMethods
'tdCol'
#' @rdname TreeDistMethods
'TDFunctions'
#' @rdname TreeDistMethods
'TDPair'
|
5c4ea3386dc6cee336eb1f1dde801a069de4b05f
|
67986f8ae2ee39d53fa028c8646505c7f0a7d6c3
|
/run_analysis.R
|
c0efc9e30970766666d0f466942f231a72d9b049
|
[] |
no_license
|
M1chael50/getdata-016_CourceProject
|
54bdc7a52ce4bc1926317665381540cce61a6224
|
355d1d0a75b0582a0b3b56bc0e50cfcc9e914e52
|
refs/heads/master
| 2021-01-13T02:19:35.850916
| 2014-12-21T18:15:00
| 2014-12-21T18:15:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,213
|
r
|
run_analysis.R
|
# run_anaysis.R an R script file to download prepare and save data
#creata data folder if not already present
if(!file.exists("data")){dir.create("data")}
#Download data file
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
localZipFile <- "./data/getdata_projectfiles_UCI HAR Dataset.zip"
download.file(fileUrl,destfile=localZipFile,method="curl")
unzip(localZipFile,exdir="./data")
dateDownloaded <- date()
# Read in features and activity_labels data files
features <- read.table("./data/UCI HAR Dataset/features.txt")
activity_labels <- read.table("./data/UCI HAR Dataset/activity_labels.txt")
# Read in full _test data files //
X_test <- read.table("./data/UCI HAR Dataset/test/X_test.txt",col.names=features[,2])
subject_test <- read.table("./data/UCI HAR Dataset/test/subject_test.txt")
y_test <- read.table("./data/UCI HAR Dataset/test/y_test.txt")
# Read in _train data files
X_train <- read.table("./data/UCI HAR Dataset/train/X_train.txt",col.names=features[,2])
subject_train <- read.table("./data/UCI HAR Dataset/train/subject_train.txt")
y_train <- read.table("./data/UCI HAR Dataset/train/y_train.txt")
#############################################################
# Merge the training and the test sets to create one data set
#############################################################
#Set column names for _test data sets
colnames(subject_test) <- "subject"
colnames(y_test) <- "activity"
#combine _test data sets
Combined_test <- cbind(subject_test,y_test,X_test)
#Set column names for _train data sets
colnames(subject_train) <- "subject"
colnames(y_train) <- "activity"
#combine _train data sets
Combined_train <- cbind(subject_train,y_train,X_train)
#merge data sets
# set new variable column for Train and Test data sets
Combined_train$dataType <- "training"
Combined_test$dataType <- "test"
mergedData <- rbind(Combined_train,Combined_test)
###########################################################################################
# 2 Extract only the measurements on the mean and standard deviation for each measurement.
###########################################################################################
library(data.table)
# convert to data.table in order to be able to ues like function
features <- data.table(features)
# use like function to extract a list of cloumn names only containing Mean or std in names
meanAndStdList <- rbind(features[V2 %like% "mean" ], features[V2 %like% "std"])
# make into true names
meanAndStdNames <- make.names(meanAndStdList[,V2])
# add in Activity Subject & dataType
# use which function to extract columns that match mean and std list
#meanAndStd <- mergedData[ , -which(names(mergedData) %in% meanAndStd)]
meanAndStdCols <- c("subject","activity",meanAndStdNames)
meanAndStdData <- mergedData[,meanAndStdCols]
##########################################################################
# 3 Uses descriptive activity names to name the activities in the data set activity_labels
##########################################################################
colnames(activity_labels) <- c("activity","activityDesc")
activityDescription <- merge(meanAndStdData,activity_labels, by.x="activity",by.y="activity")
# Remove old Activity column
activityDescription$activity <- NULL
######################################################################
# 4 Appropriately labels the data set with descriptive variable names.
######################################################################
###############################################################################################################################################
#From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
###############################################################################################################################################
library(reshape2)
meltData <- melt(activityDescription,id.var=c("activityDesc","subject"))
tidyData <- dcast(meltData, activityDesc + subject ~ variable,mean)
#write out tidy data set
write.table(tidyData, file="./tidyDataSet.txt", row.name=FALSE)
|
34ffcb48a0dd1ba626466232529710feec658edb
|
8e1e0255ef2796e9ab2b636f16c846a157046457
|
/rgl_texture.R
|
e85ff2ce6fbc17cf1a99953110dec04e8b6fa8b9
|
[] |
no_license
|
r-gris/grisexamples
|
f4f966b87442b99f185ea9eebc49866edae8677f
|
1e8ab7f6f4a9c9094406e0607c9c6fa8cf82b1e5
|
refs/heads/master
| 2020-04-13T23:22:22.981563
| 2016-08-04T14:16:46
| 2016-08-04T14:16:46
| 50,912,125
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,685
|
r
|
rgl_texture.R
|
## quad index template
p4 <- function(xp, nc) {
(xp + c(0, 0, rep(nc, 2)))[c(1, 2, 4, 3)]
}
## offset pairs from a vector
prs <- function(x) {
cbind(head(x, -1), tail(x, -1))
}
## pixel corners from a raster
edgesXY <- function(x) {
coordinates(shift(
extend(x,
extent(xmin(x), xmax(x) + res(x)[1], ymin(x), ymax(x) + res(x)[2])),
x = -res(x)[1]/2, y = -res(x)[2]/2))
}
## build a quad mesh from a raster
bgl <- function(x, z = NULL, na.rm = FALSE) {
x <- x[[1]] ## just the oneth raster for now
##exy <- as.matrix(expand.grid(edges(x), edges(x, "y")))
exy <- edgesXY(x)
ind <- apply(prs(seq(ncol(x) + 1)), 1, p4, nc = ncol(x) + 1)
## all face indexes
ind0 <- as.vector(ind) +
rep(seq(0, length = nrow(x), by = ncol(x) + 1), each = 4 * ncol(x))
## need to consider normalizing vertices here
if (na.rm) {
ind1 <- matrix(ind0, nrow = 4)
ind0 <- ind1[,!is.na(values(x))]
}
## dummy object from rgl
ob <- rgl::oh3d()
if (!is.null(z)) z <- extract(z, exy, method = "bilinear") else z <- 0
ob$vb <- t(cbind(exy, z, 1))
ob$ib <- matrix(ind0, nrow = 4)
ob
}
library(raster)
library(dismo)
library(rgdal)
library(rgl)
ll <- c(-112.1, 36.1)
## 41 Mb
## download SRTM elevation data (something's wrong with raster::getData)
##srtm <- getData("SRTM", lon = ll[1], lat = ll[2])
f <- "ftp://xftp.jrc.it/pub/srtmV4/tiff/srtm_14_05.zip"
tif <- gsub("zip$", "tif", basename(f))
if (!file.exists(basename(tif))) {
if (!file.exists(basename(f))) download.file(f, basename(f), mode = "wb")
unzip(basename(f))
}
srtm <- raster("srtm_14_05.tif")
srtm <- crop(srtm, extent(ll[1] + c(-1, 1) * 0.5, ll[2] + c(-1, 1) * 0.7))
## build mesh3d object
## we are plotting in long/lat so rescale heights
ro <- bgl(srtm, z = srtm/30000)
# 0.7Mb
## download a google satellite image with dismo
gm <- gmap(x = srtm, type = "satellite", scale = 2)
## 1. Create PNG for texture
# we need RGB expanded (gmap gives a palette)
rgb1 <- col2rgb(gm@legend@colortable)
img <- brick(gm, gm, gm)
cells <- values(gm) + 1
img <- setValues(img, cbind(rgb1[1, cells], rgb1[2, cells], rgb1[3, cells]))
## finally, create RGB PNG image to act as a texture image
writeGDAL(as(img, "SpatialGridDataFrame"), "gm.png", drivername = "PNG", type = "Byte", mvFlag = 255)
## 2. Remap the image coordinates (Mercator) onto elevation coordinates (longlat), and convert to PNG [0, 1, 0, 1]
## project our mesh to the image and get it in [0,1,0,1] of texture space
tcoords <- xyFromCell(setExtent(gm, extent(0, 1, 0, 1)), cellFromXY(gm, project(t(ro$vb[1:2, ]), projection(gm))))
shade3d(ro, col = "white", texture = "gm.png", texcoords = tcoords[ro$ib, ])
|
241f743a9f49745376ef20c66f086c148018df8f
|
4e3b9d7a25a61763cebc660778f8c673a94ac6ac
|
/man/scores.Rd
|
626a61d8e354baa0a25bb9dccd8fb3f9c6ac1aa8
|
[] |
no_license
|
paupuigdevall/GenomicScores
|
d59afb72181642776bcfc6527e7c148f67c4bd97
|
dbd072fbcdb9aafd6d49c8104ecd474e2bd777fe
|
refs/heads/master
| 2021-01-17T22:07:05.786095
| 2017-03-14T11:14:51
| 2017-03-14T11:14:51
| 84,188,155
| 0
| 0
| null | 2017-03-07T10:48:58
| 2017-03-07T10:48:58
| null |
UTF-8
|
R
| false
| false
| 2,779
|
rd
|
scores.Rd
|
\name{scores}
\alias{scores,GScores,GRanges-method}
\alias{scores}
\alias{availableGScores}
\alias{getGScores}
\title{Accessing genomic scores}
\description{
Functions to access genomic scores through \code{GScores} objects.
}
\usage{
availableGSscores()
getGScores(x)
\S4method{scores}{GScores,GRanges}(object, gpos, ...)
}
\arguments{
\item{x}{A \code{character} vector of length 1 specifiying the genomic scores resource to
fetch. The function \code{availableGScores()} shows the available genomic scores
resources.}
\item{object}{A \code{GScores} object.}
\item{gpos}{A \code{GRanges} object with positions from where to retrieve
genomic scores.}
\item{...}{In the call to the \code{scores}() method one can additionally
set the following arguments:
\itemize{
\item{summaryFun}{Function to summarize genomic scores when more than one
position is retrieved. By default, this is set to the
arithmetic mean, i.e., the \code{mean()} function.}
\item{coercionFun}{Function to coerce the stored genomic scores, before the
summary function is applied. By default genomic scores are
coerced to real (\code{numeric}-class) values, i.e., to the
\code{as.numeric()} function.}
\item{caching}{Flag setting whether genomic scores per chromosome should be
kept cached in memory (\code{TRUE}, default) or not
(\code{FALSE}). The latter option minimizes the memory footprint
but slows down the performance when the \code{scores()} method
is called multiple times.}
}}
}
\details{
The method \code{scores()} takes as first argument a \code{GScores-class} object
that can be loaded from an annotation package or from an \code{AnnotationHub} resource.
These two possibilities are illustrated in the examples below.
}
\author{R. Castelo}
\seealso{
\code{\link[phastCons100way.UCSC.hg19]{phastCons100way.UCSC.hg19}}
\code{\link[phastCons100way.UCSC.hg38]{phastCons100way.UCSC.hg38}}
}
\examples{
## accessing genomic scores from an annotation package
if (require(phastCons100way.UCSC.hg19)) {
library(GenomicRanges)
gsco <- phastCons100way.UCSC.hg19
gsco
scores(gsco, GRanges(seqnames="chr7", IRanges(start=117232380, width=5)))
}
## accessing genomic scores from AnnotationHub resources
\dontrun{
availableGScores()
gsco <- getGScores("phastCons100way.UCSC.hg19")
scores(gsco, GRanges(seqnames="chr7", IRanges(start=117232380, width=5)))
}
}
\keyword{datasets}
|
8f855769ffd5f99a85a11791ffc3029a410383eb
|
19554efd8681305208e7299d8565318779e10240
|
/server.R
|
0963c909115441a0378ed9f1e67b5cdeae1879e7
|
[] |
no_license
|
JAngstenberger/DevDataProd
|
3a9496fdad2d8a575cd03bce5b9d7fe3705a2301
|
de4998a3b6a536e7fa6e991759a076f5e90db114
|
refs/heads/master
| 2016-08-12T20:54:23.948012
| 2015-09-26T11:36:10
| 2015-09-26T11:36:10
| 43,202,253
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,210
|
r
|
server.R
|
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
# Plotting
library(ggplot2)
library(rCharts)
library(ggvis)
# Data processing libraries
library(data.table)
library(reshape2)
library(dplyr)
# Required by includeMarkdown
library(markdown)
# It has to loaded to plot ggplot maps on shinyapps.io
# library(mapproj)
# library(maps)
# Data Table
dt <- mtcars
dt <- add_rownames(mtcars, var = "cmod")
dt <- arrange(dt, cmod)
dt$gear <- factor(mtcars$gear,levels=c(3,4,5), labels=c("3gears","4gears","5gears"))
dt$am <- factor(mtcars$am,levels=c(0,1), labels=c("Automatic","Manual"))
dt$cyl <- factor(mtcars$cyl,levels=c(4,6,8), labels=c("4cyl","6cyl","8cyl"))
cars <- dt$cmod
shinyServer(function(input, output, session) {
# Define and initialize reactive values
values <- reactiveValues()
values$cars <- cars
# Create cars checkbox
output$carsControls <- renderUI({
checkboxGroupInput("cars", "Car Models: ", cars, selected=values$cars)
})
# Add observers on clear and select all buttons
observe({
if(input$clear_all == 0) return()
values$cars <- c()
})
observe({
if(input$select_all == 0) return()
values$cars <- cars
})
# Prepare data table
dt.cars <- reactive({
subset(dt, dt$cmod %in% input$cars)
})
# Render data table and create download handler
output$dt.cars <- renderDataTable(
{dt.cars()}, options = list(bFilter = FALSE, iDisplayLength = 20))
# Density Plot
output$distribGasMilage <- renderPlot({
dt.cars2 <- subset(dt, dt$cmod %in% input$cars)
# Kernel density plots for mpg
# grouped by number of gears (indicated by color)
s <- qplot(mpg, data=dt.cars2, geom="density", fill=gear, alpha=I(.5),
xlab="Miles Per Gallon", ylab="Density")
print(s)
})
# Box Plot
output$BoxPlot <- renderPlot({
dt.cars2 <- subset(dt, dt$cmod %in% input$cars)
# Boxplots of mpg by number of gears
# observations (points) are overlayed and jittered
s <- qplot(gear, mpg, data=dt.cars2, geom=c("boxplot", "jitter"),
fill=gear, xlab="", ylab="Miles per Gallon")
print(s)
})
# Regression
output$Regression <- renderPlot({
dt.cars2 <- subset(dt, dt$cmod %in% input$cars)
# Separate regressions of mpg on weight for each number of cylinders
s <- qplot(wt, mpg, data=dt.cars2, geom=c("point", "smooth"),
method="lm", formula=y~x, color=cyl,
xlab="Weight", ylab="Miles per Gallon")
print(s)
})
})
|
4df590d48bbad94e3502908bf5f585ba944aa69f
|
597a5c9f177db6f86f7c0e28dcae18052159fc8e
|
/man/grGeneAnnot.Rd
|
81413f966c04efa1f8c5644291a6667fca939516
|
[] |
no_license
|
demuellae/muRtools
|
241e3d1bdc25ada69c54d4b088980433bc7ea15d
|
74db0ac00c56bd39d95a44b52e99bbe03c22d871
|
refs/heads/master
| 2023-06-21T15:07:03.928229
| 2023-06-20T08:49:25
| 2023-06-20T08:49:25
| 18,805,524
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 978
|
rd
|
grGeneAnnot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/genomicRegions.R
\name{grGeneAnnot}
\alias{grGeneAnnot}
\title{grGeneAnnot}
\usage{
grGeneAnnot(
gr,
rsdb,
geneSetName = "genes_protein_coding",
geneSetCollection = "Gencode",
maxDist = 1e+05
)
}
\arguments{
\item{gr}{\code{GRanges} object to liftOver}
\item{rsdb}{\code{RegionSetDB} object containing a region set database from which gene annotation can be retrieved}
\item{geneSetName}{Name of the region set containng gene annotation in the \code{RegionSetDB}}
\item{geneSetCollection}{Name of the region set collection containng gene annotation in the \code{RegionSetDB}}
\item{maxDist}{maximum distance for matching to nearest gene}
}
\value{
\code{data.frame} containing information on the nearest gene for each element in \code{gr}
}
\description{
get gene annotation for a \code{GRanges} object using a \code{RegionSetDB} region database object by linking to the nearest gene
}
|
158113ab7085ba6c24f088e2f3f78e6d823125b5
|
30a4a06543abd1183da998acb931e98f40918088
|
/plot4.R
|
81cec57ea9101cbf6b4b896423b13a2d0e9d7c6b
|
[] |
no_license
|
srujanrouthu/ExData_Plotting1
|
a1912d65b1d44b400e7162e12a460e044d952f1e
|
0f339a787dcd37f44f4e0c91ae405698233ccce2
|
refs/heads/master
| 2021-01-18T12:45:59.188424
| 2014-08-10T14:44:11
| 2014-08-10T14:44:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,238
|
r
|
plot4.R
|
data <- read.table("./household_power_consumption.txt", sep = ";", header = TRUE);
DatTim <- paste(data$Date, data$Time);
DT <- strptime(DatTim, "%d/%m/%Y %H:%M:%S");
data <- cbind(DT, data);
for (i in 4:9) data[, i] <- as.numeric(as.character(data[, i]));
subdata <- subset(data, DT >= "2007-02-01 00:00:00" & DT <= "2007-02-02 23:59:59");
png("plot4.png")
par(mfrow = c(2,2))
plot(subdata$DT, subdata$Global_active_power, type = "n", xlab = "", ylab = "Global Active Power (kilowatts)");
lines(subdata$DT, subdata$Global_active_power);
plot(subdata$DT, subdata$Voltage, type = "n", xlab = "datetime", ylab = "Voltage");
lines(subdata$DT, subdata$Voltage);
plot(subdata$DT, subdata$Sub_metering_1, type = "n", xlab = "", ylab = "Energy sub metering", ylim = c(0, 40));
lines(subdata$DT, subdata$Sub_metering_1);
lines(subdata$DT, subdata$Sub_metering_2, col = "red");
lines(subdata$DT, subdata$Sub_metering_3, col = "blue");
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = c(1, 1, 1), col = c("black", "red", "blue"), bty = "n");
plot(subdata$DT, subdata$Global_reactive_power, type = "n", xlab = "datetime", ylab = "Global_reactive_power");
lines(subdata$DT, subdata$Global_reactive_power);
dev.off();
|
92e29f2056a65bad3e06968da10048669f10ff29
|
dd5a2a40de26efc49bf9daaa0c5a7bf36e17371d
|
/old-R-scripts/test_data.R
|
1b8e36433e9f6049b7a735d9dab2468baf4d1a68
|
[] |
no_license
|
tuh8888/MSPrep
|
fa0e21e7ef6c22b054721e15d8f8ed0ea0155451
|
f93dff1dcd8ffe7cfc98648166348f0deff6a112
|
refs/heads/master
| 2022-01-21T23:47:08.193216
| 2022-01-06T19:21:40
| 2022-01-06T19:21:40
| 152,809,547
| 0
| 0
| null | 2018-10-12T21:29:42
| 2018-10-12T21:29:42
| null |
UTF-8
|
R
| false
| false
| 5,937
|
r
|
test_data.R
|
#' Object exported from readdata() function
#'
#' Object exported from the readdata function. Contains clinical data and
#' summarized data.
#'
#' @docType data
#' @format
#' A data frame with 53940 rows and 10 variables:
#' The format is:
#' List of 3
#' $ sum_data1: num [1:9, 1:2654] 0 24885 23820 20730 19302 ...
#' ..- attr(*, "dimnames")=List of 2
#' .. ..$ : chr [1:9] "1x_O1" "1x_O2" "1x_O3" "2x_O1" ...
#' .. ..$ : chr [1:2654] "577.0322_0.5910416" "539.0207_0.58933336" ...
#' $ clinical :'data.frame': 9 obs. of 3 variables:
#' ..$ SubjectID: Factor w/ 9 levels "1x_O1","1x_O2",..: 1 2 3 4 5 6 7 8 9
#' ..$ Operator : int [1:9] 1 2 3 1 2 3 1 2 3
#' ..$ Spike : int [1:9] 1 1 1 2 2 2 4 4 4
#' $ medians : chr [1:23, 1:5] "251768" "101761" "79673" "468810" ...
#' @keywords datasets
#' @examples
#' data(test)
#' str(test)
"test"
#' Object exported from the filterft() function.
#'
#' Object exported from the filterft() function. Contains filtered and
#' imputation datasets
#'
#' @docType data
#' @format
#' The format is:
#' List of 4
#' $ minval :'data.frame': 9 obs. of 891 variables:
#' ..$ 577.0322_0.5910416 : num [1:9] 9445 24885 23820 20730 19302 ...
#' ..$ 539.0207_0.58933336 : num [1:9] 8035 22669 20792 19486 16070 ...
#' ..$ 723.9585_0.599889 : num [1:9] 36965 59562 58157 46326 46133 ...
#' ..$ 525.3529_0.6085925 : num [1:9] 100994 146132 148889 118769 114334 ...
#' ..$ 404.2655_0.6037692 : num [1:9] 30093 51270 46124 40558 36139 ...
#' .. [list output truncated]
#' $ count :'data.frame': 2654 obs. of 1 variable:
#' ..$ V1: num [1:2654] 1 1 0 0 4 0 0 0 0 0 ...
#' @keywords datasets
#' @examples
#' data(test2)
#' str(test2)
"test2"
#' Object exported from the normdata() function.
#'
#' Object exported from the normdata() function.
#'
#' @docType data
#' @format
#' The format is:
#' List of 12
#' $ log_data :'data.frame': 9 obs. of 891 variables:
#' ..$ 577.0322_0.5910416 : num [1:9] 14.4 14.6 14.5 14.3 14.2 ...
#' ..$ 539.0207_0.58933336 : num [1:9] 14.2 14.5 14.3 14.3 14 ...
#' ..$ 723.9585_0.599889 : num [1:9] 15.2 15.9 15.8 15.5 15.5 ...
#' ..$ 525.3529_0.6085925 : num [1:9] 16.6 17.2 17.2 16.9 16.8 ...
#' ..$ 404.2655_0.6037692 : num [1:9] 14.9 15.6 15.5 15.3 15.1 ...
#' ..$ 1164.7386_0.6163333 : num [1:9] 14.7 15.4 15.4 14.9 15 ...
#' ..$ 333.722_0.6112592 : num [1:9] 16.2 17 16.9 16.6 16.6 ...
#' .. [list output truncated]
#' $ log_data_combat : num [1:9, 1:891] 14.3 14.5 14.5 14.3 14.2 ...
#' ..- attr(*, "dimnames")=List of 2
#' .. ..$ : chr [1:9] "1x_O1" "1x_O2" "1x_O3" "2x_O1" ...
#' .. ..$ : chr [1:891] "577.0322_0.5910416" "539.0207_0.58933336" ...
#' $ log_quant :'data.frame': 9 obs. of 891 variables:
#' ..$ 577.0322_0.5910416 : num [1:9] 14.3 14.7 14.5 14.4 14 ...
#' ..$ 539.0207_0.58933336 : num [1:9] 14.2 14.6 14.3 14.3 13.6 ...
#' ..$ 723.9585_0.599889 : num [1:9] 15.2 15.9 15.8 15.5 15.4 ...
#' ..$ 525.3529_0.6085925 : num [1:9] 16.6 17.2 17.1 16.9 16.7 ...
#' ..$ 404.2655_0.6037692 : num [1:9] 14.9 15.7 15.5 15.3 15 ...
#' ..$ 1164.7386_0.6163333 : num [1:9] 14.8 15.5 15.4 14.9 14.9 ...
#' .. [list output truncated]
#' $ log_quant_combat: num [1:9, 1:891] 14.2 14.6 14.5 14.4 14.1 ...
#' ..- attr(*, "dimnames")=List of 2
#' .. ..$ : chr [1:9] "1x_O1" "1x_O2" "1x_O3" "2x_O1" ...
#' .. ..$ : chr [1:891] "577.0322_0.5910416" "539.0207_0.58933336" ...
#' $ med_adj :'data.frame': 9 obs. of 881 variables:
#' ..$ 577.0322_0.5910416 : num [1:9] -2.17 -1.94 -2.05 -2.23 -2.39 ...
#' ..$ 539.0207_0.58933336 : num [1:9] -2.35 -2.07 -2.24 -2.32 -2.65 ...
#' ..$ 723.9585_0.599889 : num [1:9] -1.404 -0.677 -0.759 -1.066 -1.131 ...
#' ..$ 525.3529_0.6085925 : num [1:9] 0.0463 0.6174 0.5968 0.2925 0.178 ...
#' ..$ 404.2655_0.6037692 : num [1:9] -1.701 -0.894 -1.094 -1.258 -1.484 ...
#' .. [list output truncated]
#' $ med_combat : num [1:9, 1:881] -2.22 -2.02 -2.07 -2.24 -2.38 ...
#' ..- attr(*, "dimnames")=List of 2
#' .. ..$ : chr [1:9] "1x_O1" "1x_O2" "1x_O3" "2x_O1" ...
#' .. ..$ : chr [1:881] "577.0322_0.5910416" "539.0207_0.58933336" ...
#' $ sva_factors : num [1:9, 1] -0.1567 0.0432 -0.1536 -0.2078 0.5435 ...
#' ..- attr(*, "dimnames")=List of 2
#' .. ..$ : NULL
#' .. ..$ : chr "f1"
#' $ sva_adj : num [1:9, 1:891] 14.4 14.4 14.4 14.5 14.3 ...
#' ..- attr(*, "dimnames")=List of 2
#' .. ..$ : chr [1:9] "1x_O1" "1x_O2" "1x_O3" "2x_O1" ...
#' .. ..$ : chr [1:891] "577.0322_0.5910416" "539.0207_0.58933336" ...
#' $ ruv_factors : num [1:9, 1] 0.1616 -0.7965 -0.1041 0.0635 0.1234 ...
#' ..- attr(*, "dimnames")=List of 2
#' .. ..$ : NULL
#' .. ..$ : chr "f1"
#' $ ruv_adj : num [1:9, 1:891] 14.4 14.5 14.4 14.4 14.4 ...
#' ..- attr(*, "dimnames")=List of 2
#' .. ..$ : chr [1:9] "1x_O1" "1x_O2" "1x_O3" "2x_O1" ...
#' .. ..$ : chr [1:891] "577.0322_0.5910416" "539.0207_0.58933336" ...
#' $ crmn_adj :'data.frame': 9 obs. of 881 variables:
#' ..$ 577.0322_0.5910416 : num [1:9] 20.8 21 21 20.7 20.5 ...
#' ..$ 539.0207_0.58933336 : num [1:9] 20.6 20.8 20.7 20.5 20.2 ...
#' ..$ 723.9585_0.599889 : num [1:9] 22.2 22.6 22.8 22.2 22.4 ...
#' ..$ 525.3529_0.6085925 : num [1:9] 24.2 24.6 24.7 24.2 24.3 ...
#' ..$ 404.2655_0.6037692 : num [1:9] 21.8 22.3 22.3 22 21.9 ...
#' ..$ 1164.7386_0.6163333 : num [1:9] 21.5 22 22.2 21.4 21.7 ...
#' .. [list output truncated]
#' $ controls : chr [1:10, 1:2] "610" "719" "734" "723" ...
#' ..- attr(*, "dimnames")=List of 2
#' .. ..$ : NULL
#' .. ..$ : chr [1:2] "ctl" ""
#' @keywords datasets
#' @examples
#' data(test3)
#' str(test3)
"test3"
|
8cdff991530c951bd008d1c073f12ddfb3903551
|
e8cb3a9a7d6a0df08c625a917a0caeaaf4b1bb79
|
/testing/test-script.R
|
498a215ff59e8229e9cb74f71a8f34f6fdd4cd1f
|
[
"MIT"
] |
permissive
|
ralhei/pyRserve
|
48793970fa22cf3624425a5ab4248a26eb2f27fd
|
c3d0a731bb393e15b2fe8a768389b0aeb53991fc
|
refs/heads/master
| 2023-08-08T11:16:23.987841
| 2023-07-27T19:19:26
| 2023-07-27T19:19:26
| 10,841,606
| 44
| 15
|
NOASSERTION
| 2023-07-27T19:19:27
| 2013-06-21T11:53:36
|
Python
|
UTF-8
|
R
| false
| false
| 520
|
r
|
test-script.R
|
# Test file for Ralph with plot returned as raw file
#
# Author: yanabr
###############################################################################
rm(list=ls())
graphics.off()
pid <- Sys.getpid()
## some dummy data
x <- sort(rnorm(100))
y <- 2*x+rnorm(100,0,0.5)
## model
model <- lm(y~x)
filename <- paste('plot_',pid,'.png',sep="")
png(width=480, height=480, file=filename)
plot(x,y)
abline(coef(model),col=2,lty=2)
dev.off()
im <- readBin(filename,"raw", 999999)
result_vector <- list(x,y,coef(model),im)
|
7126152a4b62b9818f5133505ff7e20c8ccbc035
|
e50e8103d40f6860c7e7644513e2488d8753993b
|
/R/EvalPass.R
|
f30de0bf22aac34dd37c16e1dc1f426422e0f8c4
|
[] |
no_license
|
JeremyTate/ridership
|
544368d67bbd28f0abf72e3a5b321e53eb3d4ca8
|
43521b765d1452fe1c6a9a20076ee90fdf1719dc
|
refs/heads/master
| 2020-04-28T06:57:33.887203
| 2019-03-14T02:29:41
| 2019-03-14T02:29:41
| 175,076,267
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,045
|
r
|
EvalPass.R
|
#' Evaluate passengers according to cutoff values
#'
#' A function to calculate whether or not a single observation is a passenger
#' based on specified cutoff values
#'
#' @param obs A single observation from a wifi dataset that is to be classified
#' as a passenger or not.
#' @param pass.mod A random forest model to classify "unknown" manufacturers as
#' valid manufacturers or not.
#'
#' @inheritParams PredictLabels
#'
#' @import randomForest
#'
#' @references
#'
#' @return The same observation from a wifi dataset with a "passenger" variable
#' that indicates whether or not it was determined to be a bus passenger
#' (1: passenger, 0: not a passenger). If the observation is determined to be a
#' passenger the first and last stop (first.stop and last.stop respectively) are
#' added as new columns to the observation.
#'
#' @examples
#'
EvalPass <- function(obs, stops.data, phones, dist.tol, speed.tol, time.tol,
rate.tol, pass.mod) {
last.dist <- first.dist <- rep(NA, nrow(stops.data))
for (j in 1:nrow(stops.data)) {
# distance is difference in times
first.dist[j] <- abs(obs$first.detection - stops.data$time[j])
last.dist[j] <- abs(obs$last.detection - stops.data$time[j])
}
first.closest <- which.min(first.dist)
last.closest <- which.min(last.dist)
if (obs$NetworkType == "probe" &
obs$Channel == 0 &
(is.na(obs$SSID) | obs$SSID == "#N/A" | obs$SSID == "") &
obs$Maxrate <= rate.tol &
obs$Manufacturer %in% phones &
first.dist[first.closest] <= dist.tol &
last.dist[last.closest] <= dist.tol &
first.closest != last.closest &
(is.na(obs$Minspeed) | obs$Minspeed <= speed.tol) &
obs$Detectiontime >= time.tol) {
if (obs$Manufacturer == "Unknown") {
if (predict(pass.mod, obs[, c(14, 15, 21)]) == 0) {
return(obs) # not a predicted phone on bus
}
}
obs$passenger <- 1
obs$first.stop <- stops.data$name[first.closest]
obs$last.stop <- stops.data$name[last.closest]
}
return(obs)
}
|
4e244b33945dee0e39ee5e6807fb45ff7ff94be9
|
8e6a647d5e70419c34434ec78b381949d1dc0964
|
/README.rd
|
7c663a9551140044fb25dca78f1a34e4263b3f07
|
[] |
no_license
|
acme/ruby-data-page
|
3e9f4ee8594efe160d8c9e2575db9a3790bdcc6a
|
4d9c0b0e4c939b2fd0c03dcd1a30b5d7589872ca
|
refs/heads/master
| 2020-05-27T12:58:35.007388
| 2011-08-25T13:13:02
| 2011-08-25T13:13:02
| 2,267,974
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,203
|
rd
|
README.rd
|
=begin
index:Ej
= Data/Page: Help when paging through sets of results
Last Modified: 2005-05-22 00:28:04
--
When searching through large amounts of data, it is often the case
that a result set is returned that is larger than we want to display
on one page. This results in wanting to page through various pages of
data. The maths behind this is unfortunately fiddly, hence this
module.
The main concept is that you pass in the number of total entries, the
number of entries per page, and the current page number. You can then
call methods to find out how many pages of information there are, and
what number the first and last entries on the current page really are.
For example, say we wished to page through the integers from 1 to 100
with 20 entries per page. The first page would consist of 1-20, the
second page from 21-40, the third page from 41-60, the fourth page
from 61-80 and the fifth page from 81-100. This module would help you
work this out.
== Examples
require "datapage"
page = Data::Page.new()
page.total_entries(total_entries)
page.entries_per_page(entries_per_page)
page.current_page(current_page)
puts " First page: #{page.first_page}"
puts " Last page: #{page.last_page}"
puts "First entry on page: #{page.first}"
puts " Last entry on page: #{page.last}"
== API
--- Data::Page#new ()
This is the constructor, which takes no arguments.
page = Data::Page.new()
--- Data::Page#total_entries
This method get or sets the total number of entries:
puts "Entries: #{page.total_entries}"
--- Data::Page#entries_per_page
This method get or sets the total number of entries
per page (which defaults to 10):
puts "Per page: #{page.entries_per_page}"
--- Data::Page#current_page
This method gets or sets the current page number
(which defaults to 1):
puts "Page: #{page.current_page}"
--- Data::Page#entries_on_this_page
This methods returns the number of entries on the current page:
puts "There are #{page.entries_on_this_page} entries displayed"
--- Data::Page#first_page
This method returns the first page. This is put in for reasons of
symmetry with last_page, as it always returns 1:
puts "Pages range from: #{page.first_page}"
--- Data::Page#last_page
This method returns the total number of pages of information:
puts "Pages range to: #{page.last_page}"
--- Data::Page#first
This method returns the number of the first entry on the current page:
puts "Showing entries from: #{page.first}"
--- Data::Page#last
This method returns the number of the last entry on the current page:
puts "Showing entries to: #{page.last}"
--- Data::Page#previous_page
This method returns the previous page number, if one exists. Otherwise
it returns nil:
puts "Previous page number: #{page.previous_page}" if page.previous_page
--- Data::Page#next_page
This method returns the next page number, if one exists. Otherwise
it returns nil:
puts "Next page number: #{page.next_page}" if page.next_page
--- Data::Page#splice( array )
This method takes in a array, and returns only the values which are
on the current page:
visible_holidays = page.splice(holidays);
--- Data::Page#skipped
This method is useful paging through data in a database using SQL
LIMIT clauses. It is simply page.first - 1:
--- Data::Page#change_entries_per_page
This method changes the number of entries per page and the
current page number such that the *first* item on the current
page will be present on the new page:
page.total_entries(50);
page.entries_per_page(20);
page.current_page(3);
puts page->first; # 41
page.change_entries_per_page(30);
puts page.current_page; # 2 - the page that item 41 will show in
== Notes
It has been said before that this code is "too simple" for distribution, but I
must disagree. I have seen people write this kind of code over and
over again and they always get it wrong. Perhaps now they will spend
more time getting the rest of their code right...
--
- ((<Leon Brocard|URL:http://www.astray.com/>)) -
=end
|
4bca44878dc7160c745da7da12e680bc7dc8a515
|
8c02ea3f035ea3988105a71d67b243ac351ad5eb
|
/r_studio/print_integers.R
|
c08da5e358e896c9fb65e52cc80eb3668f2a7c7f
|
[] |
no_license
|
sudharsaanj001/st2195_assignment_1
|
f2090fd27bacb233d382cddc339dbe376a9a3dd1
|
88fd9628bd564f8d2f512c5747364113d6ddc0e0
|
refs/heads/main
| 2023-08-05T11:17:04.458033
| 2021-09-25T12:58:06
| 2021-09-25T12:58:06
| 409,173,419
| 2
| 3
| null | 2021-09-25T12:56:26
| 2021-09-22T11:16:26
|
HTML
|
UTF-8
|
R
| false
| false
| 125
|
r
|
print_integers.R
|
## R script that prints all integers between 1 and 10 including 1 and 10 using a for loop.
for(i in 1:10){
print(i)}
|
dbe119d26828698f800dd3421bc3aca589649813
|
7dbd125a0a3d3b400c0b1fe0e8db71f270a8e93b
|
/Models/Reproduction/phase.R
|
0e52be744ff00d559fa903e1ae7ce09ac4950154
|
[] |
no_license
|
JusteRaimbault/MediationEcotox
|
6cf343f3bee156a79e163bcbd1b60a5c4ac62c0a
|
bbba2e1e25581821056bdd91b3246a2de16aea07
|
refs/heads/master
| 2021-01-18T22:07:15.638069
| 2019-09-17T09:00:26
| 2019-09-17T09:00:26
| 44,667,567
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,092
|
r
|
phase.R
|
# Phase space / Liapounov phase diagram for Prey Predator model
setwd(paste0(Sys.getenv('CS_HOME'),'/MediationEcotox/Results/PreyPredator'))
library(dplyr)
library(ggplot2)
source('functions.R')
resfiles = list.files(path="split")
# parameter def
for(sheepGain in c(20,60,100)){for(wolfGain in c(20,60,100)){for(sheepRepro in c(5,10,20)){
for(wolfRepro in c(5,10,20)){for(grassRegrow in c(20,60,100)){
#sheepGain=20;wolfGain=20;
#sheepRepro=5;wolfRepro=5;
withGrass=1;#grassRegrow=100;
resname=paste0('res/full/withGrass',withGrass,'_grassRegrow',grassRegrow,'_sheepGain',sheepGain,'_wolfGain',wolfGain,'_sheepRepro',sheepRepro,'_wolfRepro',wolfRepro)
show(resname)
trajs <- as.tbl(read.csv(paste0('split/',grassRegrow,'.0-',sheepGain,'.0-',sheepRepro,'.0-',withGrass,'.0-',wolfGain,'.0-',wolfRepro,'.0.csv'),header=FALSE,stringsAsFactors = FALSE))
names(trajs)[1:2]=c("x0","y0")
# check initial discrepancy
#plot(trajs[,1],trajs[,2])
# try to plot raw trajectories
trajx=list();trajy=list()
for(i in 1:nrow(trajs)){
trajx[[i]]=as.numeric(strsplit(as.character(trajs[i,3]),'-')[[1]])
trajy[[i]]=as.numeric(strsplit(as.character(trajs[i,4]),'-')[[1]])
}
# filter trajectory within some bounds
# bx=c(-1,1000);by=c(-1,1000)
# rows = which(sapply(trajx,min)>bx[1]&sapply(trajx,max)<bx[2]&sapply(trajy,min)>by[1]&sapply(trajy,max)<by[2])
# otrajx=trajx;otrajy=trajy;trajx=list();trajy=list()
# for(i in 1:length(rows)){trajx[[i]]=otrajx[[rows[i]]];trajy[[i]]=otrajy[[rows[i]]]}
# trajs=trajs[rows,]
# try with ggplot
trajid = c();trajtimes=c()
trajlength = sapply(trajx,length)-1
for(i in 1:length(trajlength)){trajid=append(trajid,rep(i,trajlength[i]));trajtimes=append(trajtimes,1:trajlength[i])}
d = data.frame(xs=unlist(lapply(trajx,function(l){l[1:(length(l)-1)]})),
ys=unlist(lapply(trajy,function(l){l[1:(length(l)-1)]})),
xe=unlist(lapply(trajx,function(l){l[2:length(l)]})),
ye=unlist(lapply(trajy,function(l){l[2:length(l)]})),
x0=trajs[trajid,1],y0=trajs[trajid,2],
id=trajid,times=trajtimes)
g=ggplot(d)
g+geom_segment(aes(x=xs,y=ys,xend=xe,yend=ye,colour=trajtimes))+ scale_colour_gradient(low="yellow",high="red")
#ylim(c(0,250))+xlim(c(0,250))
ggsave(filename=paste0(resname,'_trajs.png'))
#}}}}}
# try better representation with speed field
step=10;
x=seq(from=step/2,to=1000,by=step)
y=seq(from=step/2,to=400,by=step)
xcors=c();ycors=c();xspeed=c();yspeed=c()
for(xx in x){for(yy in y){
rows = (abs(d[,1]-xx)<step/2)&(abs(d[,2]-yy)<step/2)
if(length(which(rows))>4){
xcors=append(xcors,xx);ycors=append(ycors,yy)
xspeed = append(xspeed,mean(d[rows,3]-d[rows,1]))
yspeed = append(yspeed,mean(d[rows,4]-d[rows,2]))
}
}}
g=ggplot(data.frame(x=xcors,y=ycors,xs=xspeed,ys=yspeed),aes(x=x,y=y))
g+geom_segment(aes(xend = x + xs, yend = y+ys,colour=abs(xs)+abs(ys)),
arrow = arrow(length = unit(0.1,"cm")))+ scale_colour_gradient(low="green",high="red")
#+ theme(axis.ticks = element_line(linetype = "dashed"),
# plot.background = element_rect(fill = "white"),
# legend.position = "bottom", legend.direction = "horizontal")
ggsave(filename=paste0(resname,'_speed.png'))
}}}}}
###
# pseudo liapounov : MSE trajs from a given cell
step=25
x=seq(from=step/2,to=250,by=step)
y=seq(from=step/2,to=250,by=step)
liap = c()
trajdiff = function(t1,t2){
if(length(t1)>length(t2)){tt1=t1;tt2=append(t2,rep(t2[length(t2)],(length(t1)-length(t2))))}else{tt1=t2;tt2=append(t1,rep(t1[length(t1)],(length(t2)-length(t1))))}
return(sum((tt1-tt2)^2))
}
xcors=c();ycors=c()
for(xx in x){for (yy in y){
rows = (abs(trajs[,1]-xx)<step/2)&(abs(trajs[,2]-yy)<step/2)
mse = 0
for(k1 in which(rows)){# do it dirty
for(k2 in which(rows)){
mse = mse + trajdiff(trajx[[k1]],trajx[[k2]])+trajdiff(trajy[[k1]],trajy[[k2]])
}
}
xcors=append(xcors,xx);ycors=append(ycors,yy);liap=append(liap,mse)
}}
g=ggplot(data.frame(x=xcors,y=ycors,liap=liap))
g+geom_raster(aes(x=x,y=y,fill=liap))+ scale_fill_gradient(low="yellow",high="red")
|
395a1365aecd6df1d0a7780098998024637396a9
|
31bd220a932ce88bbabc6aa7a2819e48008dde19
|
/man/factorize.Rd
|
f927a5ad71f29df7b700b20d6d62552620c7c27f
|
[] |
no_license
|
cran/DoE.base
|
6fa51fb944ba3f841ad35302c60e0a7ab3da6902
|
ecad38c67623b041c29252cb7608e54035e0567d
|
refs/heads/master
| 2023-05-12T04:24:46.333540
| 2023-05-08T15:20:07
| 2023-05-08T15:20:07
| 17,691,739
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,404
|
rd
|
factorize.Rd
|
\name{factorize}
\alias{factorize.factor}
\alias{factorize.design}
\alias{factorize.data.frame}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Factorize integer numbers and factors
}
\description{
Methods to factorize integer numbers into primes or factors
into pseudo factors with integer numbers of levels
}
\usage{
\method{factorize}{factor}(x, name = deparse(substitute(x)), extension = letters,
drop = FALSE, sep = "", ...)
\method{factorize}{design}(x, extension = letters, sep = ".", long=FALSE, ...)
\method{factorize}{data.frame}(x, extension = letters, sep = ".", long=FALSE, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{factor\cr
OR data frame of class \code{design}
OR data frame }
\item{name}{name to use for prefixing the pseudo factors}
\item{extension}{extensions to use for postfixing the pseudo factors}
\item{drop}{TRUE: have a vector only in case of just one pseudo factor }
\item{sep}{separation between name and postfix for pseudo factors}
\item{long}{TRUE: create a complete matrix of pseudofactors;
FALSE: only create the named numbers of levels }
\item{\dots}{ currently not used}
}
\details{
These functions are used for blocking full factorials.
The method for class \code{factors} is a modification of the analogous method
from package \pkg{conf.design}, the other two are convenience versions for designs
and data frames.
}
\value{
All three methods return a matrix of pseudo factors (in case \code{long=TRUE})
or a named numeric vector of numbers of levels of the pseudo factors
(for the default \code{long=FALSE}).
}
\author{
Ulrike Groemping; Bill Venables authored the original of factorize.factor.
}
\note{
There may be conflicts with functions from packages \pkg{conf.design} or \pkg{sfsmisc}.
}
\seealso{
The function \code{\link[conf.design]{factorize}} from package \pkg{conf.design}, \cr
the function \code{factorize} from package \pkg{sfsmisc} (no link provided,
in order to avoid having to include \pkg{sfsmisc} in Suggests).
}
\examples{
factorize(12)
factorize(c(2,2,3,3,6))
factorize(fac.design(nlevels=c(2,2,3,3,6)))
unlist(factorize(c(2,2,3,3,6)))
factorize(undesign(fac.design(nlevels=c(2,2,3,3,6))))
}
\keyword{ array }
\keyword{ design }
|
a87efc6d667bba3ffd607a2b88a8e4c7c798d69c
|
9d0f2ba4463891d59f7e13983e70e20f5eb18bfa
|
/R/heterogeneity_stats.R
|
cc8c8984c24265228bbf674a50d1a6725c2e47bf
|
[
"MIT"
] |
permissive
|
donaldRwilliams/blsmeta
|
9a336f0fb78880c99101cbcc4699e22a0931918f
|
8e9cdcc3c6b66a88b2052c5e9973c66893e65d5b
|
refs/heads/main
| 2023-06-16T04:12:32.857648
| 2021-07-11T17:34:47
| 2021-07-11T17:34:47
| 357,192,752
| 8
| 1
|
NOASSERTION
| 2021-06-13T14:44:33
| 2021-04-12T12:53:50
|
R
|
UTF-8
|
R
| false
| false
| 3,877
|
r
|
heterogeneity_stats.R
|
#' @title Credible Intervals for Heterogeneity Statistics
#'
#' @description Compute credible intervals for between-study variance,
#' between-study standard deviation, I2, and H2. This
#' function mimics that of **metafor**, but for credible
#' and not confidence intervals, as well as for two and
#' three-level models.
#'
#'
#' @param object An object of class \code{blsmeta}.
#'
#' @param cred numeric. credible interval (defaults to \code{0.95}).
#'
#' @param digits numeric. The desired number of digits for the summarized
#' estimates (defaults to \code{3}).
#'
#' @param ... Currently ignored.
#'
#' @return An object of class \code{confint}, including a data.frame
#' with the estimates.
#'
#' @export
#'
#' @note There cannot be a scale model. To get I2, etc., when there
#' is a scale model, use \link[blsmeta]{I2}.
#'
#' @examples
#' library(psymetadata)
#'
#' fit_re <- blsmeta(yi = yi, vi = vi,
#' es_id = es_id,
#' data = gnambs2020)
#'
#' credint(fit_re)
credint.blsmeta <- function(object,
cred = 0.95,
digits = 3,
...){
if (!is(object, "blsmeta")) {
stop("object must be of class 'blsmeta'")
}
if (object$model == "fe") {
stop("fixed-effects models not supported")
}
if(object$model == "two_level"){
if(object$mods_scale2_f != ~1){
stop("scale model not permitted. see I2, H2, and tau2 functions.")
}
s2 <- s2_helper(object$dat_list$v)
gammas <- .extract_gamma(object)
tau <- t(exp(matrix(1) %*% t(gammas)))
tau2 <- tau^2
I2 <- (tau2 / (tau2 + s2))
H2 <- 1 / (1 - I2)
I2 <- I2 * 100
returned_object <- list(estimates =
round(
rbind.data.frame(
"tau^2" = .summary_helper(tau2, cred),
tau = .summary_helper(tau, cred),
"I^2" = .summary_helper(I2, cred),
"H^2" = .summary_helper(H2, cred)
),
digits = digits
))
} else {
if(object$mods_scale2_f != ~1){
stop("scale model not permitted. see I2, H2, and tau2 functions.")
}
if(object$mods_scale3_f != ~1){
stop("scale model not permitted. see I2, H2, and tau2 functions.")
}
s2 <- s2_helper(object$dat_list$v)
gammas <- .extract_gamma(object)
etas <- .extract_eta(object)
tau_2 <- t(exp(matrix(1) %*% t(gammas)))
tau2_2 <- tau_2^2
tau_3 <- t(exp(matrix(1) %*% t(etas)))
tau2_3 <- tau_3^2
I2_2 <- (tau2_2 / (tau2_2 + tau2_3 + s2)) * 100
I2_3 <- (tau2_3 / (tau2_2 + tau2_3 + s2)) * 100
H2 <- (tau2_2 + tau2_3 + s2) / s2
level_2 <- rbind.data.frame(
"tau^2" = .summary_helper(tau2_2, cred),
tau = .summary_helper(tau_2, cred),
"I^2" = .summary_helper(I2_2, cred)
)
level_3 <- rbind.data.frame(
"tau^2" = .summary_helper(tau2_3, cred),
tau = .summary_helper(tau_3, cred),
"I^2" = .summary_helper(I2_3, cred)
)
h2 <- rbind.data.frame("H^2" = .summary_helper(H2, cred))
returned_object <- list(level_2 = round(level_2, digits = digits),
level_3 = round(level_3, digits = digits),
h2 = round(h2, digits = digits))
}
class(returned_object) <- c("blsmeta", "confint")
return(returned_object)
}
#' @title S3 \code{credint} method
#'
#' @param object An object of class \code{blsmeta}
#' @param ... Currently ignored
#' @export
credint <- function(object, ...){
UseMethod("credint", object)
}
|
5d9925c769f0f9e616e23ec455cc837e630b7b26
|
9440c39a8e9e1cde67c704fa30ea82178421370f
|
/notebooks/190-CNV-PCAs/CNV_PCA.r
|
05a7af6fa6f6b60a998b1bb5b0d29af5b7d69c7c
|
[
"CC-BY-4.0"
] |
permissive
|
alimanfoo/ag1000g-phase2-data-paper
|
238c0f467a7599261f144ad8a2de7364949215cc
|
05f29d2e1c55467be898e5b8848fc7a5e515e649
|
refs/heads/master
| 2020-03-16T22:47:49.937764
| 2019-10-02T21:40:19
| 2019-10-02T21:40:19
| 133,052,828
| 1
| 0
| null | 2018-05-11T14:52:03
| 2018-05-11T14:52:03
| null |
UTF-8
|
R
| false
| false
| 10,614
|
r
|
CNV_PCA.r
|
# First get presence / absence data for the CNVs
cyp6.pa.calls <- read.table('../CNV_stats/tables_for_phase2_paper/cyp6aap.csv', header = T, row.names = 1)
colnames(cyp6.pa.calls) <- paste('Cyp6', colnames(cyp6.pa.calls), sep = '_')
cyp9k1.pa.calls <- read.table('../CNV_stats/tables_for_phase2_paper/cyp9k1.csv', header = T, row.names = 1)
colnames(cyp9k1.pa.calls) <- paste('Cyp9k1', colnames(cyp9k1.pa.calls), sep = '_')
# For the other two regions, we need to build the tables.
load('/home/eric/Liverpool/CNV_v2/counting_output_v4_3R/phase2/alt_fullgcnorm_nomapq_mapq002_varvar_trans000001/Gste2_analysis_shrunk_data.Rdata')
# The +0 converts the logical values to numeric
gste.pa.calls <- (read.based.gst.duplications >= 1) + 0
load('/home/eric/Liverpool/CNV_v2/counting_output_v4_3R/phase2/alt_fullgcnorm_nomapq_mapq002_varvar_trans000001/CYP6M2-Z1_analysis_shrunk_data.Rdata')
cyp6mz.pa.calls <- (read.based.cyp.duplications >= 1) + 0
# Remove all objects that aren't those tables
rm(list = ls()[!grepl('pa.calls', ls())])
# Join the tables
pa.calls <- do.call(cbind, list(cyp6.pa.calls, cyp6mz.pa.calls, cyp9k1.pa.calls, gste.pa.calls))
# Next lets get a table of coverage calls. Where there is an NA, we replace the coverage calls by 1, because
# we know the dup is at least present
cyp6.coverage.calls <- read.table('/home/eric/Manuscripts/GSTE_new/Supplementary/cyp6_description_report_190201/Cyp6_coverage_calls.csv', header = T, sep = ',', row.names = 1)
colnames(cyp6.coverage.calls) <- paste('Cyp6', colnames(cyp6.coverage.calls), sep = '_')
cyp6.coverage.calls[is.na(cyp6.coverage.calls)] <- 1
#
cyp6mz.coverage.calls <- read.table('/home/eric/Manuscripts/GSTE_new/Supplementary/cyp6m2-z1_description_report_190201/Cyp6m2-z1_coverage_calls.csv', header = T, sep = ',', row.names = 1)
colnames(cyp6mz.coverage.calls) <- paste('Cyp6mz', colnames(cyp6mz.coverage.calls), sep = '_')
cyp6mz.coverage.calls[is.na(cyp6mz.coverage.calls)] <- 1
#
cyp9k1.coverage.calls <- read.table('/home/eric/Manuscripts/GSTE_new/Supplementary/cyp9k1_description_report_190201/Cyp9k1_coverage_calls.csv', header = T, sep = ',', row.names = 1)
colnames(cyp9k1.coverage.calls) <- paste('Cyp9k1', colnames(cyp9k1.coverage.calls), sep = '_')
cyp9k1.coverage.calls[is.na(cyp9k1.coverage.calls)] <- 1
#
gste.coverage.calls <- read.table('/home/eric/Manuscripts/GSTE_new/Supplementary/GSTE_description_report_190201/Gste_coverage_calls.csv', header = T, sep = ',', row.names = 1)
colnames(gste.coverage.calls) <- paste('Gst', colnames(gste.coverage.calls), sep = '_')
gste.coverage.calls[is.na(gste.coverage.calls)] <- 1
coverage.calls <- do.call(cbind, list(cyp6.coverage.calls, cyp6mz.coverage.calls, cyp9k1.coverage.calls, gste.coverage.calls))
rownames(coverage.calls) <- sub('\\\\', '', rownames(coverage.calls))
# Next just get the tables of hmm-based CNVs.
load('../CNV_stats/CNV_stats.Rdata')
# Need to turn the list of samples carrying each CNV into a table
filtered.sample.names <- rownames(meta.reduced)
hmm.calls.list <- lapply(duplications.by.cluster.allchrom, function(x) filtered.sample.names %in% rownames(x))
hmm.calls <- do.call(cbind, hmm.calls.list) + 0
rownames(hmm.calls) <- filtered.sample.names
# Get the CNVs that passed population frequency filtering
hmm.nonsingle.calls <- hmm.calls[,rownames(subset(all.CNV.ranges.allchrom, singleton == F))]
hmm.filterpass.calls <- hmm.calls[,rownames(subset(all.CNV.ranges.allchrom, goodfreq))]
# Get the colors for the different populations:
colourscheme <- unlist(list('AOcol'= rgb(0.69439448188332953, 0.070034602810354785, 0.092318341048324815),
'BFcol'= rgb(0.98357554884517895, 0.4127950837799147, 0.28835064675293715),
'BFgam'= rgb(0.57960786223411564, 0.77019609212875362, 0.87372549772262575),
'CIcol'= rgb(0.98823529481887817, 0.62614381313323975, 0.50849674145380652),
'CMgam'= rgb(0.090196083486080159, 0.39294118285179136, 0.67058825492858887),
'FRgam'= rgb(0.47320263584454852, 0.43267974257469177, 0.69934642314910889),
'GAgam'= rgb(0.21568628648916882, 0.62875819206237793, 0.3333333432674408),
'GHcol'= rgb(0.89019608497619629, 0.18562091638644537, 0.15294117977221808),
'GHgam'= rgb(0.2909804046154022, 0.59450982809066777, 0.78901962041854856),
'GM'= rgb(0.939607846736908, 0.47137255668640132, 0.094901964068412781),
'GNcol'= rgb(0.99358708227381987, 0.83234141714432663, 0.76249136363758763),
'GNgam'= rgb(0.81411765813827519, 0.8839215755462646, 0.94980392456054685),
'GQgam'= rgb(0.7764706015586853, 0.77908498048782349, 0.88235294818878174),
'GW'= rgb(0.99607843160629272, 0.73490197658538814, 0.28000001013278963),
'KE'= rgb(0.58608230025160546, 0.58608230025160546, 0.58608230025160546),
'UGgam'= rgb(0.6810457706451416, 0.871895432472229, 0.65620917081832886)))
allcolours <- colourscheme[as.character(meta.reduced$population)]
pa.pca <- prcomp(pa.calls)
coverage.pca <- prcomp(coverage.calls)
hmm.pca <- prcomp(hmm.calls)
hmm.nonsingle.pca <- prcomp(hmm.nonsingle.calls)
hmm.filterpass.pca <- prcomp(hmm.filterpass.calls)
# Now let's try to do a pca on these.
png('PCA_comparison.png', width = 720)
par(mfrow = c(2,3), mar = c(3,3,2.5,1), mgp = c(1.5,0.3,0), tcl = -0.3, cex = 0.9)
plot(pa.pca$x[,1], pa.pca$x[,2], col = allcolours, pch = 19, cex = 0.8, xlab = 'PC 1', ylab = 'PC 2', main = 'CNVs from discordant reads')
plot(coverage.pca$x[,1], coverage.pca$x[,2], col = allcolours, pch = 19, cex = 0.8, xlab = 'PC 1', ylab = 'PC 2', main = 'CNVs from discordant\nreads with copy-number')
plot(hmm.pca$x[,1], hmm.pca$x[,2], col = allcolours, pch = 19, cex = 0.8, xlab = 'PC 1', ylab = 'PC 2', main = 'HMM-based CNVs')
plot(hmm.nonsingle.pca$x[,1], hmm.nonsingle.pca$x[,2], col = allcolours, pch = 19, cex = 0.8, xlab = 'PC 1', ylab = 'PC 2', main = 'HMM-based CNVs non-singletons')
plot(hmm.filterpass.pca$x[,1], hmm.filterpass.pca$x[,2], col = allcolours, pch = 19, cex = 0.8, xlab = 'PC 1', ylab = 'PC 2', main = 'HMM-based CNVs > 5% freq')
plot(c(0,1), c(0,1), type = 'n', bty = 'n', xaxt = 'n', yaxt = 'n', xlab = '', ylab = '')
legend(-0.05, 1, names(colourscheme)[1:8], col = colourscheme[1:8], pch = 19, lty = 0, bty = 'n', cex = 1.5)
legend(0.5, 1, names(colourscheme[9:16]), col = colourscheme[9:16], pch = 19, lty = 0, bty = 'n', cex = 1.5)
dev.off()
# Since the HMM-based CNVs are the only ones that look reasonable, let's plot those with more PCs
png('PCA_detailed.png')
par(mfrow = c(2,2), mar = c(3,3,2.5,1), mgp = c(1.5,0.3,0), tcl = -0.3)
plot(hmm.nonsingle.pca$x[,1], hmm.nonsingle.pca$x[,2], col = allcolours, pch = 19, cex = 0.8, xlab = 'PC 1', ylab = 'PC 2', main = 'HMM-based CNVs non-singletons')
plot(hmm.nonsingle.pca$x[,3], hmm.nonsingle.pca$x[,4], col = allcolours, pch = 19, cex = 0.8, xlab = 'PC 3', ylab = 'PC 4', main = 'HMM-based CNVs non-singletons')
plot(hmm.nonsingle.pca$x[,5], hmm.nonsingle.pca$x[,6], col = allcolours, pch = 19, cex = 0.8, xlab = 'PC 5', ylab = 'PC 6', main = 'HMM-based CNVs non-singletons')
plot(c(0,1), c(0,1), type = 'n', bty = 'n', xaxt = 'n', yaxt = 'n', xlab = '', ylab = '')
legend(-0.05, 1, names(colourscheme)[1:8], col = colourscheme[1:8], pch = 19, lty = 0, bty = 'n', cex = 1.5)
legend(0.5, 1, names(colourscheme[9:16]), col = colourscheme[9:16], pch = 19, lty = 0, bty = 'n', cex = 1.5)
dev.off()
png('PCA_filterpass_detailed.png')
par(mfrow = c(2,2), mar = c(3,3,2.5,1), mgp = c(1.5,0.3,0), tcl = -0.3)
plot(hmm.filterpass.pca$x[,1], hmm.filterpass.pca$x[,2], col = allcolours, pch = 19, cex = 0.8, xlab = 'PC 1', ylab = 'PC 2', main = 'HMM-based CNVs > 5% freq')
plot(hmm.filterpass.pca$x[,3], hmm.filterpass.pca$x[,4], col = allcolours, pch = 19, cex = 0.8, xlab = 'PC 3', ylab = 'PC 4', main = 'HMM-based CNVs > 5% freq')
plot(hmm.filterpass.pca$x[,5], hmm.filterpass.pca$x[,6], col = allcolours, pch = 19, cex = 0.8, xlab = 'PC 5', ylab = 'PC 6', main = 'HMM-based CNVs > 5% freq')
plot(c(0,1), c(0,1), type = 'n', bty = 'n', xaxt = 'n', yaxt = 'n', xlab = '', ylab = '')
legend(-0.05, 1, names(colourscheme)[1:8], col = colourscheme[1:8], pch = 19, lty = 0, bty = 'n', cex = 1.5)
legend(0.5, 1, names(colourscheme[9:16]), col = colourscheme[9:16], pch = 19, lty = 0, bty = 'n', cex = 1.5)
dev.off()
# We have decided to go with the non-singleton HMM-based CNVs, with more PCs, and with a barplot of
# proportion of variance explained
pca.variance <- hmm.nonsingle.pca$sdev^2
prop.pca.variance <- pca.variance / sum(pca.variance)
png('PCA_nonsingle_full.png', width = 720)
par(mfrow = c(2,3), mar = c(3,3,1,1), mgp = c(1.5,0.3,0), tcl = -0.3, cex = 0.9)
for (i in 1:4){
plot(hmm.filterpass.pca$x[,i*2-1], hmm.filterpass.pca$x[,i*2], col = allcolours, pch = 19, cex = 0.8, xlab = paste('PC', i*2-1), ylab = paste('PC', i*2))
}
plot(c(0,1), c(0,1), type = 'n', bty = 'n', xaxt = 'n', yaxt = 'n', xlab = '', ylab = '')
legend(-0.05, 1, names(colourscheme)[1:8], col = colourscheme[1:8], pch = 19, lty = 0, bty = 'n', cex = 1.5)
legend(0.5, 1, names(colourscheme[9:16]), col = colourscheme[9:16], pch = 19, lty = 0, bty = 'n', cex = 1.5)
barplot(prop.pca.variance[1:10]*100, names.arg = 1:10, border = NA, ylab = 'Variance explained (%)', xlab = 'Principal component', cex.names = 0.94)
dev.off()
png('PCA_nonsingle_full_nolegend.png', width = 720)
par(mfrow = c(2,3), mar = c(3,3,1,1), mgp = c(1.5,0.3,0), tcl = -0.3, cex = 0.9)
for (i in 1:5){
plot(hmm.filterpass.pca$x[,i*2-1], hmm.filterpass.pca$x[,i*2], col = allcolours, pch = 19, cex = 0.8, xlab = paste('PC', i*2-1), ylab = paste('PC', i*2))
}
barplot(prop.pca.variance[1:10]*100, names.arg = 1:10, border = NA, ylab = 'Variance explained (%)', xlab = 'Principal component', cex.names = 0.94)
dev.off()
# Now write the different HMM call matrices to file
write.table(hmm.calls, 'HMM_calls.csv', sep = '\t', col.names = NA)
write.table(hmm.pca$x, 'HMM_PCA.csv', sep = '\t', col.names = NA)
write.table(hmm.nonsingle.calls, 'HMM_nonsingle_calls.csv', sep = '\t', col.names = NA)
write.table(hmm.nonsingle.pca$x, 'HMM_nonsingle_PCA.csv', sep = '\t', col.names = NA)
write.table(hmm.filterpass.calls, 'HMM_filterpass_calls.csv', sep = '\t', col.names = NA)
write.table(hmm.filterpass.pca$x, 'HMM_filterpass_PCA.csv', sep = '\t', col.names = NA)
save.image('PCA.Rdata')
|
8e60153e05ff0b90df26876776b2b88012d0c41d
|
996b1f638557f3168caf0a8953d9bb17d04cacd9
|
/plot1.R
|
badfccf7b8ac000404561ec6feb94a74ec7c5f2b
|
[] |
no_license
|
rkrsathya/ExData_Plotting1
|
1fed5b1968bb3145f4b7fad91da72c9aad0e15fa
|
6457223ca27ec70502445cd55e2b1a980ea6853e
|
refs/heads/master
| 2020-05-23T08:16:50.775665
| 2016-10-09T00:31:25
| 2016-10-09T00:31:25
| 70,237,351
| 0
| 0
| null | 2016-10-07T10:36:01
| 2016-10-07T10:36:01
| null |
UTF-8
|
R
| false
| false
| 809
|
r
|
plot1.R
|
## Create a barplot graph for global active power with frequency
# Load the csv data
commData<- read.csv2("household_power_consumption.txt")
commData$modifiedDate <- as.Date(commData$Date,"%d/%m/%Y")
# Create subset of data only first 2 days of february 2007
origData <- subset(commData,commData$modifiedDate>"2007-01-31" & commData$modifiedDate<"2007-02-03")
origData$Global_active_power<- as.numeric(as.character(origData$Global_active_power))
# Create frequencies of global active power
globalpower <- table(cut(origData$Global_active_power,seq(0,10,by=0.5),labels = seq(0,9.5,by=0.5)))
# Create bar plot and save it as png
png("plot1.png")
barplot(globalpower,space = 0,col="Red",xlab = "Global Active Power (kilowatts)",ylab = "Frequency",main = "Global Active Power")
dev.off()
|
6b947092bfa5f63430f8eabbce206d185a2eafe3
|
5ac3bb2d932d370ae510ea9383e2b243eb93b6a8
|
/R/R_Day2.R
|
9237ba23634433f6f9573b3787338584f9565ff6
|
[] |
no_license
|
Kingkong92/TIL
|
637fb55fa56ae67ba08f07db7bf2b3676b280841
|
3cf134404d1904445c3a29abbc302a675b702493
|
refs/heads/master
| 2020-09-25T13:15:47.938430
| 2020-06-18T00:19:58
| 2020-06-18T00:19:58
| 226,011,113
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,429
|
r
|
R_Day2.R
|
exam<-read.csv("r데이터분석_Data/Data/csv_exam.csv")
exam
library(dplyr)
#summarise function
exam %>% summarise(mean_math=mean(math))
View(exam)
#mean(math), sd(), IQR(), max,min,sum
#group_by. 그룹별 분류 연산 가능.
exam %>%
group_by(class) %>%
summarise(mean_math=mean(math))
exam %>%
group_by(class) %>%
summarise(mm=mean(math),
sm=sum(math),
md=median(math),
cnt=n()) #그룹에 속하는 멤버를 세는 n() 함수.
#2개 이상 그룹화.
library(ggplot2)
mpg
View(mpg)
mpg %>%
group_by(manufacturer,drv) %>%
summarise(mc=mean(cty)) #%>% #cty:시내주행 연비.
#head(10)
#mpg 데이터를 회사별로 그룹화, class SUV 추출
#tot=cty와 hwy의 평균값 열 추가.
mpg %>%
group_by(manufacturer) %>%
filter(class=="suv") %>%
mutate(tot=(cty+hwy)/2) %>%
summarise(mt=mean(tot)) %>%
arrange(desc(mt)) %>%
head(5)
#자료해석: 제조사별 SUV의 상위 5개의 평균 연비.
test1<-data.frame(id=c(1,2,3,4,5),
midterm=c(60,80,70,90,55))
test2<-data.frame(id=c(1,2,3,4,5),
midterm=c(70,80,40,80,75))
test3<-data.frame(id=c(1,2,3,4,5),
final=c(70,80,40,80,75))
total<-left_join(test1,test2,by="id")
total
exam
#left함수. 합치는 함수.
name<-data.frame(class=c(1,2,3,4,5),
teacher=c("kim","lee","park","choi","Go"))
exam_new<-left_join(exam,name,by='class')
exam_new
#횡단위 합침. bind_rows
test1<-data.frame(id=c(1,2,3,4,5),
midterm=c(60,80,70,90,55))
test2<-data.frame(id=c(1,2,3,4,5),
final=c(70,80,40,80,75))
ta<-bind_rows(test1,test2)
ta
#cf)열단위 합침. bind_cols
exam %>% filter(english>=80)
exam %>% filter(class==1 & math>=50)
exam %>% filter(class %in% c(1,3,5))
exam %>%
select(id,math)
#test 컬럼 추가
#english >=60 => pass, fail
exam %>%
mutate(test=ifelse(english>=60, 'Pass',"fail")) %>%
arrange(test)
test1
test3
left_join(test1,test3,by='id')
#결측값.missing value
df<-data.frame(sex=c("M","F",NA,"M","F"),
score=c(5,4,3,5,NA))
df
#결측값 찾아내는 구문.
is.na(df)
table(is.na(df))
table(is.na(df$sex))
table(is.na(df$score))
#score열의 평균출력- NA 포함시 => NA반환.
mean(df$score)
sum(df$score)
#score가 NA인 데이터만 출력.
df %>% filter(is.na(score))
#score에서 결측치 제거
df_nomiss<- df %>% filter(!is.na(score))
df_nomiss
mean(df_nomiss$score)
sum(df_nomiss$score)
#score, sex 컬럼에서 na가 아닌 데이터만 추출.
df_nomiss<-df %>% filter(!is.na(score) &!is.na(sex))
df_nomiss
#결측치 없는 데이터만 추출(na.omit 사용)
df_nomiss2<-na.omit(df)
df_nomiss2
#na.rm: NA remove. T: True결측값 제외
mean(df$score, na.rm = T)
sum(df$score, na.rm = T)
exam<-read.csv("r데이터분석_Data/Data/csv_exam.csv")
exam
#연습. 결측치
#c() c함수-> 벡터화
exam[c(3,8,15), "math"]<-NA
exam
#결측치 때문에 NA 출력
exam %>% summarise(mm=mean(math))
#na.rm=T로 결측치 제외.
exam %>% summarise(mm=mean(math, na.rm=T),
sm=sum(math,na.rm=T),
med=median(math,na.rm=T))
#
#math열 값이 na이면 55를 대입.
exam$math<-ifelse(is.na(exam$math),55, exam$math )
exam$math
table(exam$math) #value_counts() 비슷함.
mean(exam$math) #가능해진다.
# 이상치 색출. outlier 검출
df<-data.frame(sex=c(1,2,1,3,2,1),
score=c(5,4,3,4,2,6))
table(df$sex) # 1,2외 숫자 찾기 쉬움. table함수
table(df$score)
df$sex<-ifelse(df$sex==3,NA,df$sex)
df$sex
df$score<-ifelse(df$score>5,NA,df$score)
df$score
#NA뺀 연산 작업.
df %>%
filter(!is.na(sex) & !is.na(score)) %>% #na걸러냄
group_by(sex) %>%
summarise(ms=mean(score))
boxplot(mpg$hwy)
boxplot(mpg$hwy)$stats
mean(mpg$hwy)
median(mpg$hwy)
mpg$hwy<-ifelse(mpg$hwy<12 | mpg$hwy>37, NA, mpg$hwy)
mpg$hwy
table(is.na(mpg$hwy))
#drv를 기준으로 그룹화
#mean_hwy<-hwy의 평균, 결측값은 제외
mpg %>%
group_by(drv) %>%
summarise(mean_hwy=mean(hwy,na.rm=T))
#배경 출력.나눠서 출력.
ggplot(data=mpg, aes(x=displ, y=hwy))
# 출력방식 설정. geom_point로 위에 dot 표현
ggplot(data=mpg, aes(x=displ, y=hwy))+geom_point()
#각각의 설정.
ggplot(data=mpg, aes(x=displ, y=hwy))+
geom_point()+
xlim(3,6)+
ylim(10,30)
table(is.na(df$score))
ggplot(data=mpg, aes(x=displ, y=hwy))+
geom_col()
economics
ggplot(data=economics,aes(x=date,y=unemploy))+geom_line()
install.packages("foreign")
library(foreign) #SPSS 파일 로드
library(dplyr) #전처리 관련된 연산.
library(ggplot2) #시각화 관련
library(readxl) #엑셀파일
#한국 복지 패널. koweps
#https://www.koweps.re.kr:442/
raw_welfare<-read.spss(file="r데이터분석_Data/Data/Koweps_hpc10_2015_beta1.sav", to.data.frame=T)
welfare<-raw_welfare #사본 생성
#항상 str 확인해야한다.
str(welfare)
View(welfare)
dim(welfare)
summary(welfare) #기술 통계치
#데이터의 열의 이름이 마음에 들지 않을경우
welfare<-rename(welfare,
sex=h10_g3,
birth=h10_g4,
marriage=h10_g10,
religion=h10_g11,
code_job=h10_eco9,
income=p1002_8aq1,
code_region=h10_reg7) #지역코드
View(welfare)
class(welfare$sex)
table(welfare$sex)
#이상치, 결측값 처리
welfare$sex=ifelse(welfare$sex==9,NA,welfare$sex)
#형식 기억해두기. 데이터 전처리하는 방법.
table(is.na(welfare$sex))
str(welfare)
#1=> "male", 2=>"female"
welfare$sex<-ifelse(welfare$sex==1, "male","female")
table(welfare$sex)
qplot(welfare$sex)
class(welfare$income)
summary(welfare$income)
#mean이랑 median이 50정도 차이나는것을 보니,
#극단적으로 수입이 많은 사람 때문에 생긴 현상같다.
# Min=o으로 나온것도 고려해봐야하고, NA값 처리도 해야한다.
#Max값 또한 엄청 크다.
qplot(welfare$income)+xlim(0,1000)
#이상치 결측값 처리
ifelse(welfare$income %in% c(0,9999), NA, welfare$income)
table(is.na(welfare$income))
#수입이 0원이 아닌 가구에 성별로 수입의 평균값을 출력.
sex_income<-welfare %>%
filter(!is.na(income)) %>%
group_by(sex) %>%
summarise(mi=mean(income))
#이 데이터 실제 2015년 데이터
ggplot(data=sex_income,
aes(x=sex, y=mi))+
geom_col()
summary(welfare$birth)
table(is.na(welfare$birth))
#결측값 없다.
#9999=>NA
welfare$birth<-ifelse(welfare$birth==9999,NA,welfare$birth)
table(welfare$birth)
welfare$age<-2015-welfare$birth+1
summary(welfare$sa)
qplot(welfare$age)
#나이별(연령별)에 대한 수입 변화.
age_income<-welfare %>%
filter(!is.na(income)) %>%
group_by(age) %>%
summarise(mi=mean(income))
head(age_income)
ggplot(data=age_income,
aes(x=age, y=mi))+
geom_line()
welfare<-welfare %>%
mutate(ageg = ifelse(age<30,"young",ifelse(age<59,"middle","old")))
ttable(welfare$ageg)
qplot(welfare$ageg)
#연령대별(초년,중년,장년)월 수입
#시각화까지
#연령대별 월수입평균
ageg_income<-welfare %>%
group_by(ageg) %>%
summarise(am=mean(income,na.rm = T))
welfare %>%
filter(!is.na(income)) %>%
group_by(ageg) %>%
summarise(mi=mean(income))
ageg_income
ggplot(data=ageg_income,aes(x=ageg,y=am))+geom_col()
ggplot(data=ageg_income,aes(x=ageg,y=am))+
geom_col()+
scale_x_discrete(limits=c("young","middle","old"))
#성별 연령대별 월급차이는 평균 얼마인가?
sex_income<-welfare %>%
filter(!is.na(income)) %>%
group_by(ageg,sex) %>%
summarise(mi=mean(income))
sex_income
ggplot(data=sex_income, aes(x=ageg, y=mi, fill=sex))+
geom_col(position = "dodge")
scale_x_discrete(limits=c("young","middle","old"))
#성별(sex), 연령별(age), 월급 평균표
sex_age<-welfare %>%
filter(!is.na(income)) %>%
group_by(age, sex) %>% #sex, age 순서에 따라 달라진다.
summarise(mi=mean(income))
head(sex_age)
View(sex_age)
#aes함수
ggplot(data=sex_age, aes(x=age, y=mi,col=sex))+
geom_line()
#꺽은선에서는 col, 막대그래프에선 fill 옵션 사용.
#직업 코드별 인원수 확인
welfare$code_job
table(welfare$code_job) #table함수로 각각의 갯수 확인.
#2번째 sheet를 열어야 한다.
library(readxl)
list_job<-read_excel("r데이터분석_Data/Data/Koweps_Codebook.xlsx",sheet=2, col_names=T)
list_job
#welfare에다가 code_job join
welfare<-left_join(welfare,list_job, id="code_job")
welfare$job
welfare$code_job
#welfare에서 code_job이 NA가 아닌 데이터에 대해
#code_job, job열을 출력.
welfare %>%
filter(!is.na(code_job))%>%
select(code_job,job) %>%
head(20)
# 직군별 수입
job_income<-welfare %>%
filter(!is.na(job) & !is.na(income))%>%
group_by(job) %>%
summarise(mi=mean(income))
head(job_income)
#상위 10개 직업 추출
top10<-job_income %>%
arrange(desc(mi)) %>% # 오름차순 default
head(10) #상위 10개 직업
top10
#시각화
ggplot(data=top10, aes(x=job, y=mi))+
geom_col()+
coord_flip()
#순서 재지정, 많은것부터 나온다.
ggplot(data=top10, aes(x=reorder(job,-mi), y=mi))+
geom_col()+
coord_flip()
minor10<-job_income %>%
arrange(desc(mi)) %>%
tail(10) #하위 10개 직업
#성별에 따라 어떤 직업이 가장 많은지 조사(선호도 조사)
#상위 10개 출력
welfare %>%
filter(!is.na(job)) %>%
group_by(sex, job) %>%
arrange(desc(job))
|
82b574ec598ed698eec3bd73a930511d4f3f0c4e
|
5eac4dceb44bff203fc70f2c40dbdf11b0fa84f2
|
/doc/getting_started_rvcetools.R
|
c3a20c96da4749793d45a5ec29404236ca36dc54
|
[] |
no_license
|
pvrqualitasag/rvcetools
|
a50841c74224a4dc4b239cb49c5a97899e857760
|
e3bdc284c604ab643e7429ac6b85e234bd3d24db
|
refs/heads/master
| 2021-12-30T00:27:03.715802
| 2021-12-21T16:26:10
| 2021-12-21T16:26:10
| 211,788,325
| 0
| 1
| null | 2020-04-28T06:38:37
| 2019-09-30T06:15:07
|
R
|
UTF-8
|
R
| false
| false
| 1,998
|
r
|
getting_started_rvcetools.R
|
## ---- include = FALSE----------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup, eval=TRUE----------------------------------------------------
library(rvcetools)
## ------------------------------------------------------------------------
(s_input <- system.file("extdata","VCE_results.csv", package = "rvcetools"))
## ----read_vce, message=FALSE---------------------------------------------
(tbl_vce <- read_vce(psInputFile = s_input))
## ------------------------------------------------------------------------
(mat_vcov <- matrix(c(104,75,18,75,56,12,18,12,7), nrow = 3, byrow = TRUE))
## ------------------------------------------------------------------------
cov2cor(mat_vcov)
## ------------------------------------------------------------------------
(mat_cor <- cov_to_cor(mat_vcov))
## ------------------------------------------------------------------------
cor_to_cov(pmat_cor = mat_cor, pvec_var = diag(mat_vcov))
## ------------------------------------------------------------------------
(mat_npd <- matrix(data = c(100, 80, 20, 6, 80, 50, 10, 2, 20, 10, 6, 1, 6, 2, 1, 1), nrow = 4))
## ------------------------------------------------------------------------
eigen(mat_npd, only.values = TRUE)$values
## ------------------------------------------------------------------------
(mat_bent1 <- makePD2(A = mat_npd))
## ------------------------------------------------------------------------
eigen(mat_bent1, only.values = TRUE)$values
## ------------------------------------------------------------------------
(mat_bent2 <- make_pd_rat_ev(A = mat_npd, pn_max_ratio = 100))
## ------------------------------------------------------------------------
eigen(mat_bent2, only.values = TRUE)$values
## ------------------------------------------------------------------------
Matrix::nearPD(mat_npd)
## ------------------------------------------------------------------------
sessioninfo::session_info()
|
f3a138b186f34562945e6bcfcebbd55155f6c63a
|
5db6bb7d143c6d6e53e85c89c1ad5c207adae3d2
|
/SecondSEM/DataMining/lab3/lab3.R
|
df39186301e921e631793b964887ca1a8be6a43a
|
[] |
no_license
|
alperenkara/smada
|
541c6431b3fe17d1017ef35d9c2fd5912081e5e1
|
2e7f10d25f2b2977b6371ec196498dfb835fc5b1
|
refs/heads/master
| 2021-04-15T14:08:53.009186
| 2019-01-24T22:53:36
| 2019-01-24T22:53:36
| 126,248,443
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 45
|
r
|
lab3.R
|
# Data Mining Lab 3
# 24/10/2018
# Alperen
|
7f399ea58e3a93b7393973011cfbab12e3e6f85b
|
f6aeb9fcaae4dc01c7ebc9504810dc5ccb20630a
|
/behave/DERS_DTS.R
|
3816a090c5a79909f32e403e13f04f9fe49469ad
|
[] |
no_license
|
LabNeuroCogDevel/7TBrainMech_scripts
|
e28468b895e1845c676bb4c3188719f248fd1988
|
1089f64ee73841cabae40d88a9913dacd761ed9e
|
refs/heads/master
| 2023-08-30T17:10:31.737956
| 2023-08-18T17:40:04
| 2023-08-18T17:40:04
| 160,859,221
| 4
| 1
| null | 2019-04-16T15:10:27
| 2018-12-07T18:09:20
|
MATLAB
|
UTF-8
|
R
| false
| false
| 6,518
|
r
|
DERS_DTS.R
|
#!/usr/bin/env Rscript
suppressPackageStartupMessages({library(dplyr); library(tidyr); library(glue)})
# 20220804WF - init
# Difficulties in emotion regulation scale (DERS)
# depenends on 000_getQualtrics.R writing selfreport.csv files
DERS_QUESTIONS <- c(
"I am clear about my feelings.", "I pay attention to how I feel.",
"I experience my emotions as overwhelming and out of control.",
"I have no idea how I am feeling.", "I have difficulty making sense out of my feelings.",
"I am attentive to my feelings.", "I know exactly how I am feeling.",
"I care about what I am feeling.", "I am confused about how I feel.",
"When I’m upset, I acknowledge my emotions.", "When I’m upset, I become angry with myself for feeling that way.",
"When I’m upset, I become embarrassed for feeling that way.",
"When I’m upset, I have difficulty getting work done.", "When I’m upset, I become out of control.",
"When I’m upset, I believe that I will remain that way for a long time.",
"When I’m upset, I believe that I will end up feeling very depressed.",
"When I’m upset, I believe that my feelings are valid and important.",
"When I’m upset, I have difficulty focusing on other things.",
"When I’m upset, I feel out of control.", "When I’m upset, I can still get things done.",
"When I’m upset, I feel ashamed at myself for feeling that way.",
"When I’m upset, I know that I can find a way to eventually feel better.",
"When I’m upset, I feel like I am weak.", "When I’m upset, I feel like I can remain in control of my behaviors.",
"When I’m upset, I feel guilty for feeling that way.", "When I’m upset, I have difficulty concentrating.",
"When I’m upset, I have difficulty controlling my behaviors.",
"When I’m upset, I believe there is nothing I can do to make myself feel better.",
"When I’m upset, I become irritated at myself for feeling that way.",
"When I’m upset, I start to feel very bad about myself.", "When I’m upset, I believe that wallowing in it is all I can do.",
"When I’m upset, I lose control over my behavior.", "When I’m upset, I have difficulty thinking about anything else.",
"When I’m upset I take time to figure out what I’m really feeling.",
"When I’m upset, it takes me a long time to feel better.",
"When I’m upset, my emotions feel overwhelming.")
DTS_QUESTIONS <- c(
"Feeling distressed or upset is unbearable to me.",
"When I feel distressed or upset, all I can think about is how bad I feel.",
"I can't handle feeling distressed or upset.", "My feelings of distress are so intense that they completely take over.",
"There's nothing worse than feeling distressed or upset.", "I can tolerate being distressed or upset as well as most people.",
"My feelings of distress or being upset are not acceptable.",
"I'll do anything to avoid feeling distressed or upset.", "Other people seem to be able to tolerate feeling distressed or upset better than I can.",
"Being distressed or upset is always a major ordeal for me.",
"I am ashamed of myself when I feel distressed or upset.", "My feelings of distress or being upset scare me.",
"I'll do anything to stop feeling distressed or upset.", "When I feel distressed or upset, I must do something about it immediately.",
"When I feel distressed or upset, I cannot help but concentrate on how bad the distress actually feels.")
rm_q_prefix <- function(s) gsub('^.*?- |^-','', s)
questions_subset <- function(d, Q=DERS_QUESTIONS)
d[,which(rm_q_prefix(d[1,]) %in% Q)]
ders_numeric <- function(x)
stringr::str_extract(x, '\\d+-\\d+') %>%
factor(levels=c("0-10","11-35","36-65","66-90","91-100")) %>%
as.numeric
all_surveys <- function(glob="/Volumes/L/bea_res/Data/Temporary Raw Data/7T/1*_2*[0-9]/1*_2*[0-9]_selfreport.csv"){
l <- Sys.glob(glob)
d_all <-lapply(l, function(f) read.csv(f) %>% mutate(ld8=LNCDR::ld8from(f)))
}
find_data_row <- function(d) {
data_row <- nrow(d)
if(data_row>3) data_row<-2
if(data_row>=3 && sum(sapply(d[data_row,], function(x) x=="")) > 30) data_row<-2
return(data_row)
}
add_metadata <- function(d, msg) {
attr(d,'questions') <- names(d)
names(d) <- rm_q_prefix(unlist(unname(d[1,])))
comment(d) <- msg
return(d)
}
read_ders <- function(d) {
ld8 <- d$ld8[1]
# subset to only the ders questions
d <- questions_subset(d, DERS_QUESTIONS) %>%
add_metadata(msg=glue("DERS: subset for {ld8}"))
# only care about the second row
# turn all columns numeric
# and put id back in
data_row <- find_data_row(d)
d[data_row,] %>%
mutate(across(everything(),ders_numeric), ld8 = ld8)
}
read_dts <- function(d) {
ld8 <- d$ld8[1]
# subset to only the ders questions
d <- questions_subset(d, DTS_QUESTIONS) %>%
add_metadata(msg=glue("DTS: subset for {ld8}"))
# only care about the second row
# turn all columns numeric
# and put id back in
data_row <- find_data_row(d)
d[data_row,] %>% mutate(ld8=ld8)
}
remove_empty <- function(d) {
has_data <- apply(d, 1, function(x) sum(!is.na(x))) > 1
d <- d[has_data,]
}
all_ders <- function(s=NULL){
if(is.null(s)) s <- all_surveys()
d_list <- lapply(s, read_ders)
d <- bind_rows(d_list)
# subset to just those with DERS columns. all should have ld8 column.
d <- remove_empty(d)
ld8_missing <- setdiff(sapply(s, function(x) x$ld8[1]), d$ld8)
if(length(ld8_missing)>0L) cat("# missing",length(ld8_missing), "DERS data. responses are precoded or missing? ", head(ld8_missing), "\n")
# questions isn't a default R attribute. it's not carried over from bind_rows
# varies by survey battery. no good way to store it. and maybe not useful?
#attr(d,'questions') <- attr(d_list[[which(has_data)[1]]],'questions')
#comment(d) # inhereted from last element in list
return(d)
}
all_dts <- function(s){
d_list <- lapply(s, read_dts)
d <- bind_rows(d_list)
d <- remove_empty(d) # 20221101: 328/344 survive
}
# if running from command line
# we'll lose 'questions' attribute
if(sys.nframe()==0){
cat("# collecting all surveys @", Sys.time(),"\n")
s <- all_surveys()
cat("# collecting DERS responses @", Sys.time(),"\n")
ders <- all_ders(s)
cat("# saving data w/dims",dim(ders)," to txt/ders.csv @", Sys.time(),"\n")
write.csv(file="txt/ders.csv", ders, row.names=FALSE)
cat("# collecting DTS responses @", Sys.time(),"\n")
dts <- all_dts(s)
cat("# saving data w/dims",dim(dts)," to txt/dts.csv @", Sys.time(),"\n")
write.csv(file="txt/dts.csv", dts, row.names=FALSE)
}
|
0ef034900cebe17fea21db705bc88d0073e19172
|
dbbf242e80ec855cd8e1c4c122356aa0ba74080d
|
/man/summarize_short.integer.Rd
|
3a17926a8e49f6fb900615442e35d3c9b52e7575
|
[
"MIT"
] |
permissive
|
LenaNoel/visR
|
f736dda7dead7b11b16bbc9fc3b27fb20c7efc1f
|
469f940ebd6c0b1245fa2324d0f8c031fd6b3fce
|
refs/heads/main
| 2023-08-28T17:34:54.927186
| 2021-06-15T15:25:41
| 2021-06-15T15:25:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 497
|
rd
|
summarize_short.integer.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils_table.R
\name{summarize_short.integer}
\alias{summarize_short.integer}
\title{Create variable summary for integer variables}
\usage{
\method{summarize_short}{integer}(x)
}
\arguments{
\item{x}{an object of class "integer"}
}
\value{
Short list of summary statistics for the input.
}
\description{
Calculates mean (standard deviation), median (IQR), min-max range and N/\% missing elements
for a integer vector.
}
|
7518ca3ddd452f45c7669b33670a25584fb24474
|
b721796fa801f363e375beec789b096cadb9b10b
|
/man/rmetaSMOTE.Rd
|
4c15ba6f7193d89c5f70ccd604c4ee8f57593303
|
[
"MIT"
] |
permissive
|
rusher321/rmeta
|
fdb0cc0a7eb2f69ae8f08cf99b14fc416734c53e
|
a00a2fe458c88e6b8171b7ad3fbfa931ca118db8
|
refs/heads/master
| 2022-05-13T01:46:44.924444
| 2022-03-13T08:13:17
| 2022-03-13T08:13:17
| 199,239,908
| 6
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 289
|
rd
|
rmetaSMOTE.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/modelMeta.R
\name{rmetaSMOTE}
\alias{rmetaSMOTE}
\title{rmetaSMOTE}
\usage{
rmetaSMOTE(form, data, perc.over = 200, k = 5, learner = NULL, ...)
}
\arguments{
\item{...}{}
}
\value{
}
\description{
rmetaSMOTE
}
|
f1827585cf50f87f0397f8ed06cd1cebac7f3751
|
85e2f39e88f95ebb89a9d02af3711c052c91f385
|
/Correlation_covariance(fun).R
|
eb6a15991309d8d244f6c568e5034300bf87baa7
|
[] |
no_license
|
Eustrain/Genetics-correlation
|
d86cf99af045bcc1bd823e71d840a721dd977564
|
12c1dc3e5f8f17e36f8ece1dc67ab62c6f86b3ce
|
refs/heads/master
| 2020-06-05T02:12:26.416290
| 2019-06-25T02:26:48
| 2019-06-25T02:26:48
| 192,277,371
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,299
|
r
|
Correlation_covariance(fun).R
|
library(lme4)
data <- read.csv("data.csv",head=T)
names(data)
Traits <- c("ears" ,"len" , "weight" ,"yield" )
co_gp(Traits,data$Parents,data$rep,data)
co_gp<- function(Traits,Entry,Rep,data){
traits <- Traits
geno<-as.factor(Entry)
rep <-as.factor(Rep)
###########################################
nrep <- length(levels(rep))
leng_traits<- length(traits) # number of traits
###########################################
# Creating the matrix G and P
G <- matrix(nrow = leng_traits, ncol = leng_traits)
P <- matrix(nrow = leng_traits, ncol = leng_traits)
# Estimation of variance each traits.
for (i in 1: leng_traits) {
y <- data[,traits[i]]
fm <- lmer(y ~ (1|geno)+(1|rep))
vc <- VarCorr(fm,comp="Variance")
G[i, i] <- vc$geno[1]
P[i, i] <- vc$geno[1] + attr(vc, "sc")^2/nrep
}
####Sum de each variable and estimation of variance components
###Example X1+X2=X1X2, X1+X3=X1X3 etc
for (i in 1:( leng_traits - 1)) {
for (j in (i + 1): leng_traits) {
y<-data[,traits[i]] + data[,traits[j]]
fm <-lmer(y ~ (1| geno) + (1| rep))
varcor <-VarCorr(fm)
G[i, j] <- G[j, i] <- (varcor$geno[1] - G[i, i] - G[j, j]) / 2
P[i, j] <- P[j, i] <- (varcor$geno[1] + attr(varcor, "sc")^2 / nrep - P[i, i] - P[j, j]) / 2
}
}
####################Estimation of correlation and covariance
diag_G <- diag(diag(G)^{-0.5}, leng_traits, leng_traits)
diag_P<- diag(diag(P)^{-0.5}, leng_traits, leng_traits)
GC <- diag_G %*% G %*% diag_G# Genotypic correlation matrix
PC <- diag_P %*% P %*% diag_P # Phenotypic correlation matrix
####################Names of matrix
row.names(G) <- Traits
colnames(G) <- Traits
row.names(P) <- Traits
colnames(P) <- Traits
row.names(GC) <- Traits
colnames(GC) <- Traits
row.names(PC) <- Traits
colnames(PC) <- Traits
G <- round(G,2)
P <- round(P,2)
GC <- round(GC,2)
PC <- round(PC,2)
# results
results<- list(Genetic_Cov = G, Pheno_Cov = P, Genetic_Cor = GC, Pheno_Cor = PC)
print(results)
}
##Singh, R.K. and Chaudhary, B.D. (1987) Biometrical Methods in Quantitative Genetic Analysis. Kalyani Publishers, New Delhi, Ludhiana, India, 318.
|
f2c5143b3d2cff81e2599dbffea006c3ac46a220
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/not/examples/random.intervals.Rd.R
|
64c4c36b88d1791f71e0bf09de3a7cba1c5e6e9d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 225
|
r
|
random.intervals.Rd.R
|
library(not)
### Name: random.intervals
### Title: Generate random intervals
### Aliases: random.intervals
### ** Examples
#*** draw 100 intervals with the endpoints in 1,...,100
intervals <- random.intervals(50, 100)
|
19995d9a0d98ce15176de6480c85b2556383822d
|
89fe6de0f06778887600d7bd5d369b04cb5bab61
|
/man/int_flip.Rd
|
c2fa4de15f6cc6420963d810891c7d286736bc93
|
[] |
no_license
|
Poissonfish/lubridate
|
b3326e3e3a10384e50db32c5ba46f04c00cf8063
|
398c64ed4ee549feb3e93fa99f705e7d9ad09d80
|
refs/heads/master
| 2020-04-05T23:13:10.787535
| 2016-02-19T08:40:23
| 2016-02-19T08:40:23
| 52,164,196
| 1
| 0
| null | 2016-02-20T16:54:53
| 2016-02-20T16:54:52
| null |
UTF-8
|
R
| false
| true
| 718
|
rd
|
int_flip.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/intervals.r
\name{int_flip}
\alias{int_flip}
\title{Flip the direction of an interval}
\usage{
int_flip(int)
}
\arguments{
\item{int}{An interval object}
}
\value{
An interval object
}
\description{
Reverses the order of the start date and end date in an interval. The
new interval takes place during the same timespan as the original interval,
but has the opposite direction.
}
\examples{
int <- interval(ymd("2001-01-01"), ymd("2002-01-01"))
# 2001-01-01 UTC--2002-01-01 UTC
int_flip(int)
# 2002-01-01 UTC--2001-01-01 UTC
}
\seealso{
\code{\link{int_shift}}, \code{\link{int_start}}, \code{\link{int_end}},
\code{\link{int_length}}
}
|
f72c9126c57c46b5a48efdd35cc72129578f5012
|
ed46dd8d36d63b8c5b691b5501670518499acd1c
|
/Task 3 Dealing with Numbers Operation.R
|
db68f37fbc1f186979c1f82e947aa64bc54b2f91
|
[] |
no_license
|
ThistleAna/learn-R
|
4578a78306b72d3e6d5b75d799adebce61a4d9e0
|
e10a8235ff08735abac8aae58c28d0df1122a980
|
refs/heads/main
| 2023-03-25T18:23:41.886926
| 2021-03-23T17:19:42
| 2021-03-23T17:19:42
| 350,791,158
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 335
|
r
|
Task 3 Dealing with Numbers Operation.R
|
# Write a R program to create a sequence of numbers
# from 20 to 50 and find the mean of numbers from 20
# to 60 and sum of numbers from 51 to 91.
print("Print number sequence from 20 to 50.")
print(seq(20,50))
print("Print the mean of numbers from 20 to 60")
print(mean(20:60))
print("Sum of numbers from 51 to 91")
print(sum(51:91))
|
0ca26e8e707613604680316dbc98b16197abbb88
|
f8b1d3258c2927f59a4d59cb19cf62157cc835e1
|
/tests/testthat/apps/MIQ_en_num-items-8/app.R
|
3d0877062a5e360119935fd407df4f84200e5ccc
|
[
"MIT"
] |
permissive
|
ViolaPsch/MIQ
|
71c3f094abb0b8da1f3fae7c56f178f3f7c4bfbf
|
95b15b33422ecd374dc010777d9f2012fac98662
|
refs/heads/master
| 2022-12-25T09:58:59.311073
| 2020-09-23T14:58:18
| 2020-09-23T14:58:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 64
|
r
|
app.R
|
library(psychTestR)
library(MIQ)
MIQ_standalone(num_items = 8)
|
60ff7ff013ca5a7fc19c708c37edbb53b9095d5d
|
ec2b9803a923d928751c76bbf1c31227928bffc9
|
/R/samples.sample.R
|
da71ba965dec005304f3eee08d1231ffd0dc6d79
|
[] |
no_license
|
cran/BRugs
|
a5106711a3f8d3fa0adb91465df235e0f27a1b18
|
acafa2035e6ef39e566085026eeabf67cd1361cd
|
refs/heads/master
| 2023-05-27T11:23:45.986896
| 2023-05-15T05:52:29
| 2023-05-15T05:52:29
| 17,677,954
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,103
|
r
|
samples.sample.R
|
"samplesSample" <-
function(node)
# Get stored sample for single component of OpenBUGS name
{
if(samplesGetFirstChain() > samplesGetLastChain())
stop("Number of first chain is larger than last chain!")
if(length(node) != 1)
stop("Exactly one scalar node must be given.")
sM <- samplesMonitors(node)[1]
if(sM == "model must be initialized before monitors used")
stop("model must be initialized / updated / monitored before samplesSample is used")
if(length(grep("^no monitor set for variable", sM)))
stop(sM)
nodeSize <- .OpenBUGS(c("BugsRobjects.SetVariable", "BugsRobjects.GetSize"),
c("CharArray","Integer"),
list(node,NA))[[2]]
if(nodeSize > 1)
stop("Only scalar nodes such as ", node, "[1] are allowed.")
sampleSize <- samplesSize(node)
sample <- .OpenBUGS(c(.SamplesGlobalsCmd(node), "SamplesEmbed.SampleValues"),
c("CmdInterpreter","RealArray"),
list(node,double(sampleSize)))[[2]]
sample
}
|
e235f654e3853d8c54ff739114190df76c4db6ec
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googledrivev3.auto/man/team.delete.Rd
|
bb30808d7a8d198b15e82f13808658797fe5baa4
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 840
|
rd
|
team.delete.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drive_functions.R
\name{team.delete}
\alias{team.delete}
\title{Permanently deletes a Team Drive for which the user is an organizer. The Team Drive cannot contain any untrashed items.}
\usage{
team.delete(teamDriveId)
}
\arguments{
\item{teamDriveId}{The ID of the Team Drive}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/drive
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/drive)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/drive/}{Google Documentation}
}
|
6d6e23f356b9d2f588530ee812284eaa6ebf8073
|
90b1d6a6c3bbf2a83b94b72b4867c94f8bb2aab5
|
/run_analysis.R
|
8bf8ee025c18ca7e3ae11a07ce472315c8f82ac3
|
[] |
no_license
|
jcasaboza/run_analysis.r
|
249cb970da3c4b52c0817ddb4de9981a5486232f
|
033f8f01a6078dbf27f14691bb55d77593acd56e
|
refs/heads/main
| 2022-12-25T12:56:36.077427
| 2020-10-10T01:17:55
| 2020-10-10T01:17:55
| 302,150,933
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,068
|
r
|
run_analysis.R
|
#Im using library dplyr
library(dplyr)
#now im going to download and prepare the data
currdir <- "./data"
if(!dir.exists("./data")) dir.create("./data")
setwd(currdir)
downloadurl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
zipfile <- "UCI HAR Dataset.zip"
download.file(downloadurl, zipfile)
if(file.exists(zipfile)) unzip(zipfile)
#The files've been downloaded and the following files exist
basedir <- "UCI HAR Dataset"
featuresfile <- paste(basedir, "features.txt", sep="/")
activitylabelsfile <- paste(basedir, "activity_labels.txt", sep="/")
testvariablesfile <- paste(basedir, "test/X_test.txt", sep="/")
testactivityfile <- paste(basedir, "test/y_test.txt", sep="/")
testsubjectfile <- paste(basedir, "test/subject_test.txt", sep="/")
trainvariablesfile <- paste(basedir, "train/X_train.txt", sep="/")
trainactivityfile <- paste(basedir, "train/y_train.txt", sep="/")
trainsubjectfile <- paste(basedir, "train/subject_train.txt", sep="/")
neededfiles <- c(featuresfile,
activitylabelsfile,
testvariablesfile,
testactivityfile,
testsubjectfile,
trainvariablesfile,
trainactivityfile,
trainsubjectfile
)
sapply(neededfiles, function(f) if(!file.exists(f)) stop(paste("Needed file ", f, " doesn't exist. Exitting ...", sep="")))
#Reading featuresfiles
features <- read.table(featuresfile, col.names=c("rownumber","variablename"))
allvariables <-
mutate(features, variablename = gsub("BodyBody", "Body", variablename))
####
## Filter the 66 variables - mean() and std()
####
neededvariables <- filter(allvariables, grepl("mean\\(\\)|std\\(\\)", variablename))
####
## Make the allvariables readable
## Remove special characters, Convert to lower case
####
allvariables <- mutate(allvariables, variablename = gsub("-", "", variablename),
variablename = gsub("\\(", "", variablename),
variablename = gsub("\\)", "", variablename),
variablename = tolower(variablename))
####
## Make the neededvariables readable
## Remove special characters, Convert to lower case
####
neededvariables <- mutate(neededvariables, variablename = gsub("-", "", variablename),
variablename = gsub("\\(", "", variablename),
variablename = gsub("\\)", "", variablename),
variablename = tolower(variablename))
####
## Read activitylabelsfile
activitylabels <- read.table(activitylabelsfile, col.names=c("activity", "activitydescription"))
####
####
## Read in test data stats
####
testvalues <- read.table(testvariablesfile, col.names = allvariables$variablename)
testneededvalues <- testvalues[ , neededvariables$variablename]
####
## Read in test activities
testactivities <- read.table(testactivityfile, col.names=c("activity"))
####
####
## Read in test subjects
testsubjects <- read.table(testsubjectfile, col.names=c("subject"))
####
####
## Add a readable activity description
testactivitieswithdescr <- merge(testactivities, activitylabels)
####
####
## Put the test data together
## Assuming that the data is in the same order and all we need is cbind
## Combining values, activities, subjects
testdata <- cbind(testactivitieswithdescr, testsubjects, testneededvalues)
####
####
## Read in train variables
####
trainvalues <- read.table(trainvariablesfile, col.names = allvariables$variablename)
trainneededvalues <- trainvalues[ , neededvariables$variablename]
####
## Read in train activities
trainactivities <- read.table(trainactivityfile, col.names=c("activity"))
####
####
## Read in train subjects
trainsubjects <- read.table(trainsubjectfile, col.names=c("subject"))
####
####
## Add a readable activity description
trainactivitieswithdescr <- merge(trainactivities, activitylabels)
####
####
## Put the train data together
## Assuming that the data is in the same order and all we need is cbind
## Combining values, activities, subjects
traindata <- cbind(trainactivitieswithdescr, trainsubjects, trainneededvalues)
####
####
## Combine the testdata and traindata
## Additionally make subject a factor
alldata <- rbind(testdata, traindata) %>% select( -activity )
alldata <- mutate(alldata, subject = as.factor(alldata$subject))
####
####
## Write the data out
write.table(alldata, "Mean_And_StdDev_For_Activity_Subject.txt")
####
####
## Create a second, independent tidy data set with the average of each
## variable for each activity and each subject.
## Group the data by activity, subject
allgroupeddata <- group_by(alldata,activitydescription,subject)
## Get the average of each variable
summariseddata <- summarise_each(allgroupeddata, funs(mean))
## Write the data out
write.table(summariseddata, "Average_Variable_By_Activity_Subject.txt", row.names = FALSE)
|
0484b83e77804fb8f60d210dfb6bf32735cbb705
|
cfc494fb498c1c61870253df3e99d209cbcd821b
|
/Summary Stats, Hyp Testing, Regression R Code - 1.R
|
4c86cd706ac9feabd77f0de6df98474ef6df2e5e
|
[] |
no_license
|
yangzh6598/R
|
3a9cba7421cecee136f3b2e75499ac17396e2d15
|
6c70b2bd3734a794cf2eac97ed5b9bd4a2152d26
|
refs/heads/master
| 2021-01-20T15:39:00.367264
| 2017-05-09T20:17:08
| 2017-05-09T20:17:08
| 90,787,952
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,606
|
r
|
Summary Stats, Hyp Testing, Regression R Code - 1.R
|
##### Summary statistics and visualization
mydata=read.csv(file=file.choose()) #Brings pop up box to select .csv data file
mydata #Displays all data
attach(mydata) #Makes columns accessible by name
head(mydata) #Displays column names and first 6 data points
nrow(mydata) #Reports number of observations (data points)
ncol(mydata) #Reports number of variables per observation
summary(mydata) #Reports Minimum, Maximum, Mean, and Q1,Median, and Q3 for each data column
mean(mydata$Time) #Reports mean of Time from dataset
min(Time) #Reports minimum of Time from dataset
max(Time) #Reports maximum of Time from dataset
range(Time)
####### Visualization
boxplot(mydata) #Creates generic boxplot
boxplot(mydata[,1:2],main="Time to deliver package",xlab="Postman") #Creates boxplot with times only, adds title and x label
hist(Time) #Creates naive histogram of times
hist(Time,breaks=16) #Creates a histogram with 16 bins
plot(houses,Time)
cov(houses,Time)
######## Confidence intervals
alpha=.05
xbar=mean(Time) #mean
s=sd(Time) #Standard deviation
n=length(Time) #number of observations
t=qt(1-alpha/2, df=n-1) #alpha/2 t value with n-1 degrees of freedom
z=qnorm(1-alpha/2) #alpha/2 z value
lower = xbar-t*s/sqrt(n) #lower confidence bound
upper = xbar+t*s/sqrt(n) #upper confidence bound
####### T-tests
#One sample t-test
t.test(Time,mu=30) # H0: mu = 30 H1: mu /= 30
t.test(Time,mu=28,alternative="greater") # H0: mu = 28 H1: mu > 28
#Two sample t-test with equal variance
#alternative can be "two.sided","greater",or"less"
#mu is the hypothesized difference between means of populations
#Equal variance True/False
#Confidence level for fixed test
#paired test True/False
t.test(Time,Time2,alternative="two.sided",mu=0,var.equal=F,conf.level=0.95,paired=F)
#For paired t-test, it is often convenient to look at boxplots or histograms of the differences
boxplot(Time-Time2)
hist(Time-Time2)
#Another way to perform paired test
t.test(Time-Time2,alternative="t",mu=0,var.equal=F,conf.level=0.95)
##### ANOVA (Analysis of variance)
Mileage = c(354,363,381,382,370,364,370,382,373)
Octane = as.factor(c(87,87,87,89,89,89,93,93,93)) #cast treatment labels as factor
fit = aov(Mileage~Octane)
summary(fit)
datafilename="http://personality-project.org/r/datasets/R.appendix1.data" #read data from URL link
data.ex1=read.table(datafilename,header=T) #read the data into a table
aov.ex1 = aov(Alertness~Dosage,data=data.ex1) #do the analysis of variance
summary(aov.ex1) #show the summary table
print(model.tables(aov.ex1,"means"),digits=3) #report the means and the number of subjects/cell
boxplot(Alertness~Dosage,data=data.ex1) #graphical summary
datafilename="http://personality-project.org/r/datasets/R.appendix2.data"
data.ex2=read.table(datafilename,header=T) #read the data into a table
data.ex2 #show the data
aov.ex2 = aov(Alertness~Gender+Dosage,data=data.ex2) #do the analysis of variance
summary(aov.ex2) #show the summary table
print(model.tables(aov.ex2,"means"),digits=3) #report the means and the number of subjects/cell
boxplot(Alertness~Dosage+Gender,data=data.ex2) #graphical summary of means of the 4 cells
##### Regression
model1=lm(Time~kms) #Performs simple linear regression using kms as predictor and Time as response
model1
summary(model1)
#Plots data and adds linear regression line
plot(kms,Time)
abline(model1)
#Calculates variance of errors, sigma^2
summary(model1)$sigma^2
#Check histogram and QQ plot of residuals
hist(rstandard(model1))
qqnorm(rstandard(model1))
qqline(rstandard(model1))
#Check for correlation of errors
plot(rstandard(model1))
abline(0,0)
#Multivariate linear model
model2=lm(Time~kms+houses+pieces)
summary(model2)
#Checking Fit of Model
#Creates plot of standardized residuals against index number
plot(rstandard(model2))
abline(0,0)
#Plots standardized residuals against predictor variable kms
plot(kms,rstandard(model2))
abline(0,0)
#Plots standardized residuals against predictor variable houses
plot(houses,rstandard(model2))
abline(0,0)
#Plots standardized residuals against predictor variable pieces
plot(pieces,rstandard(model2))
abline(0,0)
#Creates histogram of standardized residuals to check for normality
hist(rstandard(model2))
#Create q-q plot to check for normality
qqnorm(rstandard(model2))
qqline(rstandard(model2))
|
bd1913fec1f7f56d1dafc3c9ce77965c08234210
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/koRpus/R/FOG.R
|
8d0fc730ee3f5dc8d94f684f2274e2afa3ecc983
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,001
|
r
|
FOG.R
|
# Copyright 2010-2014 Meik Michalke <meik.michalke@hhu.de>
#
# This file is part of the R package koRpus.
#
# koRpus is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# koRpus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with koRpus. If not, see <http://www.gnu.org/licenses/>.
#' Readability: Gunning FOG Index
#'
#' This is just a convenient wrapper function for \code{\link[koRpus:readability]{readability}}.
#'
#' Calculates the Gunning FOG index. In contrast to \code{\link[koRpus:readability]{readability}},
#' which by default calculates all possible indices, this function will only calculate the index value.
#'
#' If \code{parameters="PSK"}, the revised parameters by Powers-Sumner-Kearl (1958) are used, and
#' if \code{parameters="NRI"}, the simplified parameters from the Navy Readability Indexes, respectively.
#'
#' @param txt.file Either an object of class \code{\link[koRpus]{kRp.tagged-class}}, a character vector which must be be
#' a valid path to a file containing the text to be analyzed, or a list of text features. If the latter, calculation
#' is done by \code{\link[koRpus:readability.num]{readability.num}}.
#' @param hyphen An object of class kRp.hyphen. If \code{NULL}, the text will be hyphenated automatically.
#' @param parameters A list with named magic numbers and a vector with verb suffixes, defining the relevant parameters for the index,
#' or one of \code{"PSK"} or \code{"NRI"}.
#' @param ... Further valid options for the main function, see \code{\link[koRpus:readability]{readability}} for details.
#' @return An object of class \code{\link[koRpus]{kRp.readability-class}}.
# @author m.eik michalke \email{meik.michalke@@hhu.de}
#' @references
#' DuBay, W.H. (2004). \emph{The Principles of Readability}. Costa Mesa: Impact Information.
#' WWW: \url{http://www.impact-information.com/impactinfo/readability02.pdf}; 22.03.2011.
#'
#' Powers, R.D, Sumner, W.A, & Kearl, B.E. (1958). A recalculation of four adult readability formulas,
#' \emph{Journal of Educational Psychology}, 49(2), 99--105.
#' @keywords readability
#' @export
#' @examples
#' \dontrun{
#' FOG(tagged.text)
#' }
FOG <- function(txt.file, hyphen=NULL, parameters=list(syll=3, const=0.4, suffix=c("es", "ed", "ing")), ...){
if(is.list(txt.file)){
results <- readability.num(txt.features=txt.file, hyphen=hyphen, index="FOG", parameters=list(FOG=parameters), ...)
} else {
results <- readability(txt.file=txt.file, hyphen=hyphen, index="FOG", parameters=list(FOG=parameters), ...)
}
return(results)
}
|
2f2ae3262f65b804be7ed5039e53d4fb75cc820c
|
be348ef72c01bd46481b14a9f9df770b46c25f72
|
/Skellam.R
|
16027810689d3afbdcfc221653f278b1d161df75
|
[] |
no_license
|
cardsbettor/OddsEngine
|
b0ad1d16bf02e54da316240ec825ecc48c7ecd58
|
dcda80a365a96bf1602f2ac19119c57f93007ccc
|
refs/heads/main
| 2023-02-10T00:13:42.766742
| 2021-01-01T21:18:05
| 2021-01-01T21:18:05
| 321,608,874
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 229
|
r
|
Skellam.R
|
Skellam <- function(diff,mu1,mu2){
#Probability a Poisson dist. with lambda = mu1 exceeds Poisson dist. with lambda = mu2 by diff or more.
k = 0
for (i in diff:10){
k <- k + skellam::dskellam(i,mu1,mu2)
}
k
}
|
24025ddb7b6964d248eb85d59202b85431e6149e
|
5ab3adef6c2a9e4e13b0b127dfe71c7446269a87
|
/r_walkthrough.r
|
53adeaf9506f1da7da3ed4a78d1bb09e443fd1e3
|
[] |
no_license
|
analyticsPierce/times-series-forecasting
|
e61059fb8cdaadb1cdc17966e24dd0efe210b52e
|
ea5bd73ad240440238a3cec293e69c1e29eaadb4
|
refs/heads/master
| 2021-01-01T17:22:05.240964
| 2013-10-06T20:27:30
| 2013-10-06T20:27:30
| 13,368,602
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,794
|
r
|
r_walkthrough.r
|
# libraries - load these before you get started
library(ggplot2)
library(Hmisc)
library(forecast)
library(RMySQL)
# load data
# this is a basic file, notice it loads from a url
# this will create a dataframe. read about these. I use dataframes almost all the time. these are like a table in a database.
timeSeries <- read.csv("http://dl.dropbox.com/u/23226147/time_series_test.csv")
# here are some handy functions for evaluating the data you imported. select each one after the comment.
# head(timeSeries)
# summary(timeSeries)
# str(timeSeries)
# describe(timeSeries)
# some basic field manipulation assignments
# convert a field to a date format
timeSeries$Date <- as.Date(timeSeries$Date)
# rename a column
timeSeries$Revenue <- timeSeries$Bookings
# delete a column
timeSeries$Bookings <-NULL
# lets start by creating a basic scatter plot
# plot orders over time with a regression line and confidence bands
ggplot(timeSeries, aes(x=Date, y=Orders)) + geom_point() + stat_smooth()
# now we need to create some dates that extend into the future so we can forecast them
date.seq <- as.Date("2010-10-18"):as.Date("2010-11-30")
# print(date.seq)
# next we use forecast to create future results
uv.results <- data.frame(forecast(timeSeries$Unique.Visitors,44,level=c(80,95), fan=FALSE))
carts.results <- data.frame(forecast(timeSeries$Carts,44,level=c(80,95), fan=FALSE))
orders.results <- data.frame(forecast(timeSeries$Orders,44,level=c(80,95), fan=FALSE))
revenue.results <- data.frame(forecast(timeSeries$Revenue,44,level=c(80,95), fan=FALSE))
# create basic forecasts for each column and create a new dataframe with all our data
new.dates <- data.frame(days = as.Date(date.seq, origin = "1970-01-01"), uv.results$Point.Forecast, carts.results$Point.Forecast, orders.results$Point.Forecast, revenue.results$Point.Forecast)
# head(new.dates)
# now we have a dataframe of future dates and forecast then add it to your existing dates
names(new.dates) <- list(a="Date", b="Unique.Visitors", c="Carts", d="Orders", e="Revenue")
results.fc <- rbind(timeSeries, new.dates)
print(results.fc)
str(results.fc)
# create a better visual with our new forecast, notice we are changing the color of the dots to align with revenue
ggplot(results.fc, aes(x = Date, y = Unique.Visitors)) +
geom_point(aes(colour = Revenue)) +
stat_smooth()
# now save your sweet graph
ggsave("/filepath/visitors_bookings_timeSeries_results.png",width=4,height=2)
# now lets save this great data out to our MySQL database
# create the connection (assuming this database already exists)
my_db <- dbConnect(MySQL(), user="root", dbname="my_db")
# writing the data to a new table called "forecast_data"
dbWriteTable(my_db, "forecast_data", results.fc, append=FALSE)
# disconnect from db
dbDisconnect(my_db)
|
629d91588f161d231971213ab33fa170e7be8445
|
98f1d2f59230bdc06be9be1a7c88de458115ae92
|
/Data.R
|
260053be4c2e631dbc73cd1f887cecb149bcb808
|
[] |
no_license
|
aimod62/DDP-Final-Project
|
f099e2f134a932fc0cc507d2b66fe82103ffc6c3
|
71d20515629cfc42e2cdaf26e1060ba2e304fb45
|
refs/heads/master
| 2021-01-19T00:28:08.567901
| 2017-04-05T02:54:15
| 2017-04-05T02:54:15
| 87,173,037
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,599
|
r
|
Data.R
|
library(dplyr)
library(reshape2)
library(ggplot2)
library("dygraphs")
library(plotly)
library(shiny)
library(maps)
#Extracting and Cleaning the Data.
#Source:World Bank - Data Bank
#Loading the Raw Data
data1<- read.csv("~/DDP_Finalproject/57b46c40-8cd0-44f0-bb12-b47778bb9861_Data.csv", stringsAsFactors = FALSE)
View(data1)
#Manipulating the Data
p_1<- data1 %>%
filter(Series.Name == "Adjusted net national income (annual % growth)") %>%
select(Country.Code, X2011..YR2011., X2012..YR2012., X2013..YR2013.,X2014..YR2014.) %>%
mutate("2011" = round(as.numeric(X2011..YR2011.), 1),
"2012" = round(as.numeric(X2012..YR2012.), 1),
"2013" = round(as.numeric(X2013..YR2013.), 1),
"2014" = round(as.numeric(X2013..YR2013.), 1))%>%
select(-starts_with("X")) %>%
melt(id = c("Country.Code"))
#Writing a .csv file
write.csv(p_1, "~/data1.csv")
incData<-read.csv("~/DDP_Finalproject/data1.csv", stringsAsFactors = FALSE)
View(incData)
str(incData)
## Creating Dygraph reactive Object
#Loading the Data
data2 <- read.csv("~/DDP_Finalproject/6d85ede2-232f-4977-93eb-144065932f49_Data.csv", stringsAsFactors = FALSE )
View(data2)
str(data2)
#Manipulating the data
p_2 <-data2[ ,5:21] %>% t() %>% as.data.frame()%>% select(V1) %>% cbind(1997:2013) %>%
setNames(.,c("CO2 emissions ","Year" ))
rownames(p_2) <- c()
p_3 <- p_2[, c(2,1)]
#Writing a .csv file
write.csv(p_3, "~/data3.csv")
CO2_ELSV<-read.csv("~/DDP_Finalproject/data3.csv", stringsAsFactors = FALSE)
View(CO2_ELSV)
str(CO2_ELSV)
|
ff0119a13324cdb49f26622bfa64efc17ae90e15
|
6c11c5e9d7c83793a48cf2ce0a4e15160aeb6c54
|
/R/c.R
|
9a87d3711e2986f2f6789a7c694f0750b529ef27
|
[] |
no_license
|
earowang/vctrs
|
ff6704de55bb6ca9448471de0ec749ef43c0b7f1
|
671dd82c6af02a2cdc5b3b26795f521dfa199609
|
refs/heads/master
| 2020-05-28T09:41:58.097869
| 2020-05-04T17:11:05
| 2020-05-04T17:11:05
| 188,959,149
| 0
| 0
| null | 2019-05-28T05:21:43
| 2019-05-28T05:21:42
| null |
UTF-8
|
R
| false
| false
| 1,790
|
r
|
c.R
|
#' Combine many vectors into one vector
#'
#' Combine all arguments into a new vector of common type.
#'
#' @section Invariants:
#' * `vec_size(vec_c(x, y)) == vec_size(x) + vec_size(y)`
#' * `vec_ptype(vec_c(x, y)) == vec_ptype_common(x, y)`.
#'
#' @param ... Vectors to coerce.
#' @param .name_repair How to repair names, see `repair` options in [vec_as_names()].
#' @return A vector with class given by `.ptype`, and length equal to the
#' sum of the `vec_size()` of the contents of `...`.
#'
#' The vector will have names if the individual components have names
#' (inner names) or if the arguments are named (outer names). If both
#' inner and outer names are present, an error is thrown unless a
#' `.name_spec` is provided.
#' @inheritParams vec_ptype_show
#' @inheritParams name_spec
#' @seealso [vec_cbind()]/[vec_rbind()] for combining data frames by rows
#' or columns.
#' @export
#' @examples
#' vec_c(FALSE, 1L, 1.5)
#'
#' # Date/times --------------------------
#' c(Sys.Date(), Sys.time())
#' c(Sys.time(), Sys.Date())
#'
#' vec_c(Sys.Date(), Sys.time())
#' vec_c(Sys.time(), Sys.Date())
#'
#' # Factors -----------------------------
#' c(factor("a"), factor("b"))
#' vec_c(factor("a"), factor("b"))
#'
#'
#' # By default, named inputs must be length 1:
#' vec_c(name = 1)
#' try(vec_c(name = 1:3))
#'
#' # Pass a name specification to work around this:
#' vec_c(name = 1:3, .name_spec = "{outer}_{inner}")
#'
#' # See `?name_spec` for more examples of name specifications.
vec_c <- function(...,
.ptype = NULL,
.name_spec = NULL,
.name_repair = c("minimal", "unique", "check_unique", "universal")) {
.External2(vctrs_c, .ptype, .name_spec, .name_repair)
}
vec_c <- fn_inline_formals(vec_c, ".name_repair")
|
ab665ba138d1afdc4a6e5eca8d9ac31f5629bd36
|
4980082f584caa193954b2268589998718b01a10
|
/app.R
|
2d7a2c8fbfa8a65c80caf5075f1de285056fe503
|
[] |
no_license
|
jingwang24/PowerSS-shiny-app
|
c223bba059b70f9cac7ee43d56f59deae89a2a1d
|
9536d0065fe56e05c221865277091db2ce54df41
|
refs/heads/master
| 2020-03-23T05:39:41.551270
| 2018-07-16T15:38:14
| 2018-07-16T15:38:14
| 141,158,899
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,825
|
r
|
app.R
|
library(shiny)
library(dplyr)
library(tidyr)
library(ggplot2)
library(exact2x2)
library(shinythemes)
library(DT)
fet <- function(a,b,c,d) {round(fisher.test(matrix(c(a,b,c,d), byrow=T, nrow=2))$p.value,3)}
n1<-n2<-16
matrix_out<- matrix("NA",n1+1,n2+1)
for (i in 0:n1){
for (j in 0:n2){
matrix_out[i+1,j+1] <- fet(i,n1-i,j,n2-j)
}
}
matrix_out<-as.data.frame(matrix_out)
ui <- fluidPage(
theme = shinytheme("united"),
titlePanel("Power and Sample Size Calculation",windowTitle = "Power/SampleSize"),
hr(),
h4("This is a simplified calculator; for now we will assume 2 groups only.
Also, we will come back to time-to-event"),
br(),br(),
h5("If you have a set sample size and want to know the power,
choose 'Power' tab, otherwise choose 'SampleSize' tab"),
sidebarLayout(
sidebarPanel(width = 5,
wellPanel(
h5("Set your expected proportion of outcome (yes; infected; dead)"),
br(),
h5("in the Control Group:"),
numericInput(inputId = "p1",
label = "p1",
value=0.2,
min=0.0001,max=1),
br(),
h5("in the Experimental Group:"),
numericInput(inputId = "p2",
label = "p2",
value=0.8,
min=0.0001,max=1)
),
hr(),
hr(),
conditionalPanel("input.tabselected=='Power'",
h5("Set sample size in each group (enter an integer between 0 and 16):"),
br(),
h5("in the Control Group:"),
numericInput(inputId = "n1",
label = "n1",
value=5,
min=1,max=16),
br(),
h5("in the Experimental Group:"),
numericInput(inputId = "n2",
label = "n2",
value=5,
min=1,max=16)
),
conditionalPanel("input.tabselected=='SampleSize'",
h5("Set the power (enter a number between 0 and 1):"),
br(),
numericInput(inputId = "power",
label = "Power",
value=0.9,
min=0, max=1)
),
conditionalPanel("input.tabselected=='P-value Table'",
h5("Set sample size in each group (enter an integer between 0 and 16):"),
br(),
h5("in the Control Group:"),
numericInput(inputId = "n1_",
label = "n1",
value=2,
min=1,max=16),
br(),
h5("in the Experimental Group:"),
numericInput(inputId = "n2_",
label = "n2",
value=2,
min=1,max=16)
)
),
mainPanel( width=7,
tabsetPanel(
id = "tabselected",
tabPanel("Power",
br(),
br(),
textOutput("output_power")),
tabPanel("SampleSize",
br(),
textOutput("output_samplesize"),
br(),
br(),
plotOutput("output_hist")),
tabPanel("P-value Table",
DT::dataTableOutput("output_table"),
br(),
br(),
textOutput("des"))
)
)
)
)
server <- function(input, output) {
### Power tab
output$output_power<-renderText({
paste("Power =",round(power2x2(input$p1,input$p2,input$n1,input$n2)$power,3))
})
### SampleSize tab and histogram
output$output_samplesize<-renderText({
paste("Total Sample Size per Group =",ss2x2(input$p1,input$p2,power=input$power)$n0)
})
myTab<-reactive({
out <- matrix("NA",input$n1+1,input$n2+1)
for (i in 0:input$n1){
for (j in 0:input$n2){
out[i+1,j+1] <- fet(i,input$n1-i,j,input$n2-j)
}
}
n <- 10000
temp <- rep(out, round(n*t(t(dbinom(0:input$n1,input$n1,input$p1))) %*% dbinom(0:input$n2,input$n2,input$p2)))
})
output$output_hist<-renderPlot({
hist(as.numeric(myTab()),breaks=20, xlab="Distribution of P-values", probability=T,yaxt="n",
main="This is a histogram of the distributino of \n p-values you might get \n the line shows p=.05",ylab="")
abline(v=.05, col="red")
mtext(paste(round(mean(myTab()<=.05),3)," | ",round(mean(myTab()>.05),3)), side=3,adj=0,col="red")
mtext("0.05", side=1,at=.05, col="red")
})
### P-value table tab
output$output_table<-DT::renderDataTable({
matrix_out_subset<-matrix_out[1:(input$n1_+1),1:(input$n2_+1)]
colnames(matrix_out_subset) <- paste(0:input$n2_,"/",input$n2_," (",round(dbinom(0:input$n2_,input$n2_,input$p2),3),")",sep="")
rownames(matrix_out_subset) <- paste(0:input$n1_,"/",input$n1_," (",round(dbinom(0:input$n1_,input$n1_,input$p1),3),")",sep="")
DT::datatable(data=matrix_out_subset,options=list(pageLength =20))
})
output$des<-renderText({
paste("The table below shows the possible outcomes for group 1 as the rows (n1=",input$n1_,"),and for group 2 as the columns (n2=",input$n2_,". For each row/comlumn,
the probability of that outcome based on the probabilities that were specified
are also given in the row/column headings. The data in the table represent
the p-values that you would get for that combination of outcomes given by the row and column total.
")
})
}
shinyApp(ui = ui, server = server)
|
550d90bb09147a3942753b1c6f2e215de2ac471c
|
c59d908f1c76f552f18eb074ca4e03bb189a50de
|
/Models/NLP_TextMining/NLP.R
|
228f8e801b43ed20154dcd246196ff2d58e0d88b
|
[] |
no_license
|
ZellW/MachineLearning
|
c64a20d9a53699d78b71da08c9f96ede7b8cabcf
|
d5c751c1d23f425db86e2410c4f9be7577bf3cf3
|
refs/heads/master
| 2021-04-26T23:12:35.957795
| 2020-12-14T13:40:38
| 2020-12-14T13:40:38
| 123,948,478
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,257
|
r
|
NLP.R
|
library(cleanNLP)
library(sotu)
library(dplyr)
library(tokenizers)
library(reticulate)
reticulate::use_python("../anaconda3/envs/NLP2/")
data(sotu_text)
data(sotu_meta)
txt <- c("You're invited to join Yext on Tuesday, April 23rd, in a private suite",
"The Charlotte Knights take on the Toledo Mud Hens",
"Minor League Baseball at BB&T Ballpark!",
"We will be enjoying the game from the 3rd Base Suite which has fantastic views of the field as well as the Uptown Charlotte skyline.",
"Plus we will have plenty of food and drinks to go around!")
writeLines(txt, tf <- tempfile())
cnlp_init_tokenizers()
######################
#https://rstudio.github.io/reticulate/articles/python_packages.html
py_config()
conda_create("NLP2")
# Solving environment: ...working... done
#
#
# ==> WARNING: A newer version of conda exists. <==
# current version: 4.5.11
# latest version: 4.6.8
#
# Please update conda by running
#
# $ conda update -n base -c defaults conda
#
#
# ## Package Plan ##
#
# environment location: C:\Users\czbs7d\DOCUME~1\ANACON~1\envs\NLP2
#
# added / updated specs:
# - python
#
#
# The following packages will be downloaded:
#
# package | build
# ---------------------------|-----------------
# setuptools-40.8.0 | py37_0 663 KB
# pip-19.0.3 | py37_0 1.8 MB
# certifi-2019.3.9 | py37_0 155 KB
# openssl-1.1.1b | he774522_1 5.7 MB
# python-3.7.2 | h8c8aaf0_10 17.7 MB
# ca-certificates-2019.1.23 | 0 158 KB
# wheel-0.33.1 | py37_0 57 KB
# sqlite-3.27.2 | he774522_0 941 KB
# ------------------------------------------------------------
# Total: 27.2 MB
#
# The following NEW packages will be INSTALLED:
#
# ca-certificates: 2019.1.23-0
# certifi: 2019.3.9-py37_0
# openssl: 1.1.1b-he774522_1
# pip: 19.0.3-py37_0
# python: 3.7.2-h8c8aaf0_10
# setuptools: 40.8.0-py37_0
# sqlite: 3.27.2-he774522_0
# vc: 14.1-h0510ff6_4
# vs2015_runtime: 14.15.26706-h3a45250_0
# wheel: 0.33.1-py37_0
# wincertstore: 0.2-py37_0
#
#
# Downloading and Extracting Packages
# setuptools-40.8.0 | 663 KB | ########## | 100%
# pip-19.0.3 | 1.8 MB | ########## | 100%
# certifi-2019.3.9 | 155 KB | ########## | 100%
# openssl-1.1.1b | 5.7 MB | ########## | 100%
# python-3.7.2 | 17.7 MB | ########## | 100%
# ca-certificates-2019 | 158 KB | ########## | 100%
# wheel-0.33.1 | 57 KB | ########## | 100%
# sqlite-3.27.2 | 941 KB | ########## | 100%
# Preparing transaction: ...working... done
# Verifying transaction: ...working... done
# Executing transaction: ...working... done
# #
# # To activate this environment, use:
# # > activate NLP2
# #
# # To deactivate an active environment, use:
# # > deactivate
# #
# # * for power-users using bash, you must source
# #
conda_install("NLP2", "spacy")
# Solving environment: ...working... done
#
#
# ==> WARNING: A newer version of conda exists. <==
# current version: 4.5.11
# latest version: 4.6.8
#
# Please update conda by running
#
# $ conda update -n base -c defaults conda
#
#
#
# ## Package Plan ##
#
# environment location: C:\Users\czbs7d\DOCUME~1\ANACON~1\envs\NLP2
#
# added / updated specs:
# - spacy
#
#
# The following packages will be downloaded:
#
# package | build
# ---------------------------|-----------------
# cffi-1.12.2 | py37hb32ad35_1 218 KB conda-forge
# spacy-2.1.3 | py37he980bc4_0 54.6 MB conda-forge
# intel-openmp-2019.3 | 203 1.7 MB
# six-1.12.0 | py37_1000 21 KB conda-forge
# pycparser-2.19 | py37_1 171 KB conda-forge
# preshed-2.0.1 | py37h33f27b4_0 70 KB
# pyopenssl-19.0.0 | py37_0 81 KB conda-forge
# pyrsistent-0.14.11 | py37hfa6e2cd_0 89 KB conda-forge
# asn1crypto-0.24.0 | py37_1003 154 KB conda-forge
# certifi-2019.3.9 | py37_0 149 KB conda-forge
# wasabi-0.2.0 | py_0 18 KB conda-forge
# cython-blis-0.2.4 | py37hfa6e2cd_0 2.6 MB conda-forge
# thinc-7.0.4 | py37he980bc4_0 1.3 MB conda-forge
# jsonschema-3.0.0a3 | py37_1000 98 KB conda-forge
# tqdm-4.31.1 | py_0 40 KB conda-forge
# attrs-19.1.0 | py_0 32 KB conda-forge
# plac-0.9.6 | py_1 18 KB conda-forge
# murmurhash-1.0.0 | py37h6538335_0 17 KB conda-forge
# idna-2.8 | py37_1000 100 KB conda-forge
# openssl-1.1.1b | hfa6e2cd_2 4.8 MB conda-forge
# requests-2.21.0 | py37_1000 84 KB conda-forge
# numpy-1.16.2 | py37h8078771_1 4.0 MB conda-forge
# cymem-2.0.2 | py37h74a9793_0 35 KB
# ca-certificates-2019.3.9 | hecc5488_0 184 KB conda-forge
# srsly-0.0.5 | py37h6538335_0 183 KB conda-forge
# win_inet_pton-1.1.0 | py37_0 7 KB conda-forge
# cryptography-2.6.1 | py37h7a1dbc1_0 561 KB
# libcblas-3.8.0 | 4_mkl 3.5 MB conda-forge
# libblas-3.8.0 | 4_mkl 3.5 MB conda-forge
# liblapack-3.8.0 | 4_mkl 3.5 MB conda-forge
# mkl-2019.1 | 144 158.3 MB
# pysocks-1.6.8 | py37_1002 22 KB conda-forge
# urllib3-1.24.1 | py37_1000 148 KB conda-forge
# chardet-3.0.4 | py37_1003 184 KB conda-forge
# ------------------------------------------------------------
# Total: 240.4 MB
#
# The following NEW packages will be INSTALLED:
#
# asn1crypto: 0.24.0-py37_1003 conda-forge
# attrs: 19.1.0-py_0 conda-forge
# cffi: 1.12.2-py37hb32ad35_1 conda-forge
# chardet: 3.0.4-py37_1003 conda-forge
# cryptography: 2.6.1-py37h7a1dbc1_0
# cymem: 2.0.2-py37h74a9793_0
# cython-blis: 0.2.4-py37hfa6e2cd_0 conda-forge
# idna: 2.8-py37_1000 conda-forge
# intel-openmp: 2019.3-203
# jsonschema: 3.0.0a3-py37_1000 conda-forge
# libblas: 3.8.0-4_mkl conda-forge
# libcblas: 3.8.0-4_mkl conda-forge
# liblapack: 3.8.0-4_mkl conda-forge
# mkl: 2019.1-144
# murmurhash: 1.0.0-py37h6538335_0 conda-forge
# numpy: 1.16.2-py37h8078771_1 conda-forge
# plac: 0.9.6-py_1 conda-forge
# preshed: 2.0.1-py37h33f27b4_0
# pycparser: 2.19-py37_1 conda-forge
# pyopenssl: 19.0.0-py37_0 conda-forge
# pyrsistent: 0.14.11-py37hfa6e2cd_0 conda-forge
# pysocks: 1.6.8-py37_1002 conda-forge
# requests: 2.21.0-py37_1000 conda-forge
# six: 1.12.0-py37_1000 conda-forge
# spacy: 2.1.3-py37he980bc4_0 conda-forge
# srsly: 0.0.5-py37h6538335_0 conda-forge
# thinc: 7.0.4-py37he980bc4_0 conda-forge
# tqdm: 4.31.1-py_0 conda-forge
# urllib3: 1.24.1-py37_1000 conda-forge
# wasabi: 0.2.0-py_0 conda-forge
# win_inet_pton: 1.1.0-py37_0 conda-forge
#
# The following packages will be UPDATED:
#
# ca-certificates: 2019.1.23-0 --> 2019.3.9-hecc5488_0 conda-forge
# certifi: 2019.3.9-py37_0 --> 2019.3.9-py37_0 conda-forge
# openssl: 1.1.1b-he774522_1 --> 1.1.1b-hfa6e2cd_2 conda-forge
#
#
# Downloading and Extracting Packages
# cffi-1.12.2 | 218 KB | ########## | 100%
# spacy-2.1.3 | 54.6 MB | ########## | 100%
# intel-openmp-2019.3 | 1.7 MB | ########## | 100%
# six-1.12.0 | 21 KB | ########## | 100%
# pycparser-2.19 | 171 KB | ########## | 100%
# preshed-2.0.1 | 70 KB | ########## | 100%
# pyopenssl-19.0.0 | 81 KB | ########## | 100%
# pyrsistent-0.14.11 | 89 KB | ########## | 100%
# asn1crypto-0.24.0 | 154 KB | ########## | 100%
# certifi-2019.3.9 | 149 KB | ########## | 100%
# wasabi-0.2.0 | 18 KB | ########## | 100%
# cython-blis-0.2.4 | 2.6 MB | ########## | 100%
# thinc-7.0.4 | 1.3 MB | ########## | 100%
# jsonschema-3.0.0a3 | 98 KB | ########## | 100%
# tqdm-4.31.1 | 40 KB | ########## | 100%
# attrs-19.1.0 | 32 KB | ########## | 100%
# plac-0.9.6 | 18 KB | ########## | 100%
# murmurhash-1.0.0 | 17 KB | ########## | 100%
# idna-2.8 | 100 KB | ########## | 100%
# openssl-1.1.1b | 4.8 MB | ########## | 100%
# requests-2.21.0 | 84 KB | ########## | 100%
# numpy-1.16.2 | 4.0 MB | ########## | 100%
# cymem-2.0.2 | 35 KB | ########## | 100%
# ca-certificates-2019 | 184 KB | ########## | 100%
# srsly-0.0.5 | 183 KB | ########## | 100%
# win_inet_pton-1.1.0 | 7 KB | ########## | 100%
# cryptography-2.6.1 | 561 KB | ########## | 100%
# libcblas-3.8.0 | 3.5 MB | ########## | 100%
# libblas-3.8.0 | 3.5 MB | ########## | 100%
# liblapack-3.8.0 | 3.5 MB | ########## | 100%
# mkl-2019.1 | 158.3 MB | ########## | 100%
# pysocks-1.6.8 | 22 KB | ########## | 100%
# urllib3-1.24.1 | 148 KB | ########## | 100%
# chardet-3.0.4 | 184 KB | ########## | 100%
# Preparing transaction: ...working... done
# Verifying transaction: ...working... done
# Executing transaction: ...working... done
reticulate::use_python("../anaconda3/envs/NLP2/")
py_config()
cnlp_init_spacy()
# https://github.com/statsmaths/cleanNLP/issues/10
reticulate::import("spacy")
https://github.com/explosion/spaCy/issues/1761
#####################
anno <- cnlp_annotate(tf)
names(anno)
cnlp_get_token(anno)
sotu <- cleanNLP::cnlp_annotate(sotu_text, as_strings = TRUE, meta = sotu_meta)
cnlp_get_token(sotu_text) %>%
group_by(id, sid) %>%
summarize(sent_len = n()) %$%
quantile(sent_len, seq(0,1,0.1)txt
|
332065cd113d449ae8902d7de6c14ea7017365cc
|
1888b813183a0924e0cce3f466f0b16b268bc090
|
/stats/chisq.test.R
|
317fe1ad7aa7771c1ab745c09cd8aa7c90d0a66c
|
[] |
no_license
|
abreschi/utils
|
ea61091e53fa67ee504675c54025d4dc1efda201
|
30c8497eef35967315e75c15d573093ad79c1df0
|
refs/heads/master
| 2021-01-22T20:19:36.973179
| 2019-04-09T20:21:18
| 2019-04-09T20:21:18
| 85,313,317
| 0
| 4
| null | 2017-06-23T12:51:20
| 2017-03-17T13:15:41
|
Python
|
UTF-8
|
R
| false
| false
| 431
|
r
|
chisq.test.R
|
#!/usr/bin/env Rscript
cat("USAGE: script.R setAB setA setB total\n")
args = commandArgs(TRUE)
args = as.list(as.double(args))
# Given the contingency matrix
# _
# | A | A
# --|---------
# B | a | b
# _ |---------
# B | c | d
#
a=args[[1]]
b=args[[2]]-a
c=args[[3]]-a
d=args[[4]]-a-b-c
M = matrix(c(a,b,c,d), byrow=T, nrow=2)
print(M)
print(chisq.test(M)$expected)
cat("\np.value=",chisq.test(M)$p.value,"\n")
|
810b23e9b652b95175af389b39cc89016e5e766d
|
22d395b282768b359dff330261add3fbb29961c8
|
/man/smhiAPI.Rd
|
c63a19a79747eb031ac5f8ff1c7e975b2a410750
|
[] |
no_license
|
Antpe404/SMHI_API
|
40c9cfa39c2b71b2c947a03b84089e66a725eb74
|
f64f4eb5f0297dfd6513177dc90621801c0d1966
|
refs/heads/master
| 2020-05-29T08:46:22.218753
| 2016-09-30T14:08:43
| 2016-09-30T14:08:43
| 69,466,113
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 359
|
rd
|
smhiAPI.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/smhiAPI.R
\docType{package}
\name{smhiAPI}
\alias{smhiAPI}
\alias{smhiAPI-package}
\title{Access the SMHI API via R}
\description{
This package uses XML and JSON to access the SMHI API.
The package includes four functions, which will help you
fetch and plot your weather data.
}
|
41556746264216778048f5b611cec611d43888d6
|
22f761644fa84c4fe0086e3a013fd1f636e2ae0c
|
/inst/doc/Financial_and_non_financial.R
|
6f0c054fa96d751a0bae4ed763f481c53437ae03
|
[] |
no_license
|
cran/bizdays
|
a8fe606fd516f02231f0e6e42377f32747a76892
|
fc0512ebbae7cbb9d8b26829ca35004fc3cf9f3d
|
refs/heads/master
| 2023-01-25T04:01:52.148067
| 2023-01-20T16:40:06
| 2023-01-20T16:40:06
| 17,694,808
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,362
|
r
|
Financial_and_non_financial.R
|
## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## ----message=FALSE, warning=FALSE---------------------------------------------
library(bizdays)
create.calendar(name = "example1", weekdays = c("saturday", "sunday"), start.date = "2017-01-24", end.date = "2017-01-30", holidays = "2017-01-25", financial = FALSE)
calendars()[["example1"]] # view the calendar
## -----------------------------------------------------------------------------
bizdays("2017-01-24", "2017-01-26", "example1")
## -----------------------------------------------------------------------------
bizdays("2017-01-24", "2017-01-25", "example1")
bizdays("2017-01-25", "2017-01-26", "example1")
## -----------------------------------------------------------------------------
offset("2017-01-25", c(-1, 1), "example1")
## -----------------------------------------------------------------------------
bizdays("2017-01-25", "2017-01-28", "example1")
bizdays("2017-01-25", "2017-01-29", "example1")
## ----message=FALSE, warning=FALSE---------------------------------------------
create.calendar(name = "example2", weekdays = c("saturday", "sunday"), start.date = "2017-01-24", end.date = "2017-01-30", holidays = "2017-01-25", financial = TRUE)
calendars()[["example2"]] # view the calendar
## -----------------------------------------------------------------------------
bizdays("2017-01-25", "2017-01-26", "example2")
## -----------------------------------------------------------------------------
offset("2017-01-25", 1, "example2")
## -----------------------------------------------------------------------------
prev_date = preceding("2017-01-25", "example2")
prev_date
bizdays(prev_date, "2017-01-26", "example2")
offset(prev_date, 1, "example2")
## ----message=FALSE, warning=FALSE---------------------------------------------
create.calendar(name = "example3", weekdays = c("saturday", "sunday"), start.date = "2017-01-24", end.date = "2017-01-30", holidays = "2017-01-25", financial = TRUE, adjust.from = preceding, adjust.to = following)
calendars()[["example3"]] # view the calendar
## -----------------------------------------------------------------------------
bizdays("2017-01-25", "2017-01-26", "example3")
offset("2017-01-25", 1, "example3")
|
8c7315c6696a469a64a24dea18a55f75e1ac44bd
|
7e7bb7bfdf62c24b7fecf78f5247d28839728710
|
/Leading Educators/Leading Educator Rand.R
|
96bdc9ef1371baae5750c79ef782c517344c9868
|
[] |
no_license
|
kippchicago/Data_Analysis
|
1ad042d24c7a1e11e364f39c694692f5829363a4
|
8854db83e5c60bc7941654d22cbe6b9c63613a7f
|
refs/heads/master
| 2022-04-09T10:10:43.355762
| 2020-02-20T18:03:40
| 2020-02-20T18:03:40
| 5,903,341
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,100
|
r
|
Leading Educator Rand.R
|
library(silounloadr)
library(tidyverse)
library(lubridate)
schools <- data_frame(schoolid = c(78102, 7810, 400146, 4001462, 400163, 4001802, 400180, 4001632),
schoolname = c("Ascend Primary", "Ascend Middle", "Academy Chicago","Academy Chicago Primary", "Bloom", "One Primary", "One Academy", "Bloom Primary"),
schoolabbr =c("KAP", "KAMS", "KAC", "KACP", "KBCP", "KOP", "KOA", "KBP"))
terms <- c("Spring 2016-2017", "Spring 2017-2018", "Spring 2018-2019")
get_by_terms <- function(term){
get_nwea_map(table_name = "cdf_combined_kipp_cps") %>%
select(student_id,
student_first_name,
student_last_name,
student_ethnic_group,
term_name,
school_name,
measurement_scale,
test_start_date,
test_ritscore) %>%
filter(term_name == term,
measurement_scale %in% c("Reading", "Mathematics")) %>%
collect()
}
map <-
purrr::map_df(terms,
~ get_by_terms(.x)
)
map_all <- map %>%
filter(school_name %in% c("KIPP Create College Prep", "KIPP Bloom College Prep", "KIPP Ascend Middle School",
"KIPP One Academy", "KIPP Academy Chicago", "KIPP One Primary", "KIPP Ascend Primary", "KIPP Bloom Primary", "KIPP Ascend Primary School"))
map_year <- function(term_name) {
map_all %>%
filter(term_name == term_name) %>%
select(-c(student_first_name,
# student_ethnic_group
student_last_name)) %>%
rename(student_number = student_id) %>%
group_by(student_number,
# student_ethnic_group,
term_name,
school_name) %>%
pivot_wider(names_from = measurement_scale, values_from = c(test_start_date, test_ritscore))
}
map_1617 <- map_year("Spring 2016-2017")
map_1718 <- map_year("Spring 2017-2018")
map_1819 <- map_year("Spring 2018-2019")
students <- get_powerschool("students") %>%
select(student_number,
# schoolid,
dob,
ethnicity,
gender,
# grade_level,
entrydate,
exitdate,
first_name,
last_name,
id) %>%
collect()
all_enroll <- get_powerschool("ps_enrollment_all") %>%
select(id = studentid,
schoolid,
sy_entrydate = entrydate,
sy_exitdate = exitdate,
grade_level,
yearid) %>%
filter(yearid %in% c(26, 27, 28)) %>%
# filter(grade_level > 3) %>%
collect()
all_enroll_students <- all_enroll %>%
left_join(students, by = "id") %>%
select(-c(entrydate,
exitdate))
#### Attendance ####
# get attendance
attendance <- get_powerschool("attendance") %>%
filter(att_date >= lubridate::ymd("2016-08-20")) %>%
filter(att_date <= lubridate::ymd("2019-06-23")) %>%
filter(att_mode_code == "ATT_ModeDaily") %>%
collect()
membership <- silounloadr::get_powerschool("ps_membership_reg") %>%
filter(yearid %in% c(26, 27, 28)) %>%
select(studentid,
schoolid,
date = calendardate,
enrolled = studentmembership,
grade_level,
attendance = ATT_CalcCntPresentAbsent) %>%
collect()
# get attendance code table
attendance_code <- get_powerschool("attendance_code") %>%
mutate(att_code = if_else(att_code == "true", "T", att_code)) %>% #
collect()
attendance_complete <- attendance %>%
right_join(attendance_code %>%
select(attendance_codeid = id,
att_code),
by = "attendance_codeid")
# combine membership with attendance complete table
member_att <- membership %>%
left_join(attendance_complete %>%
select(studentid,
att_date,
att_code
#presence_status_cd
),
by =c("studentid",
"date" = "att_date"))
# Identify whether each att_code is enrolled, present, absent, or tardy for each student for each day
attend_student <- member_att %>%
filter(date >= lubridate::ymd("2016-08-20")) %>%
filter(date <= lubridate::ymd("2019-06-23")) %>%
mutate(enrolled0 = 1,
enrolled = if_else(att_code == "D" & !is.na(att_code), 0, enrolled0),
present0 = ifelse(is.na(att_code) | att_code == "", 1, 0),
present1 = ifelse(att_code %in% c("A", "S"), 0, present0),
present2 = ifelse(att_code == "H", 0.5, present1),
present3 = ifelse(att_code %in% c("T", "E", "L", "I"), 1, present2),
present = ifelse(is.na(present2), 1, present3),
absent = (1 - present)*enrolled,
tardy = ifelse(att_code %in% "T", 1, 0)) %>%
left_join(students %>%
select(studentid = id,
student_number,
first_name,
last_name),
by="studentid") %>%
inner_join(schools, by=c("schoolid")) %>%
filter(schoolabbr %in% c("KAMS", "KAC", "KBCP", "KOA", "KOP", "KBP", "KAP")) %>%
mutate(school_year = case_when(
ymd(date) >= ymd("2016-08-20") & ymd(date) <= ymd("2017-06-23") ~ "2016-2017",
ymd(date) >= ymd("2017-08-20") & ymd(date) <= ymd("2018-06-23") ~ "2017-2018",
TRUE ~ "2018-2019"
)) %>%
select(studentid,
student_number,
first_name,
last_name,
grade_level,
school_year,
schoolid,
schoolname,
schoolabbr,
date,
att_code,
enrolled,
present,
absent,
tardy)
# summarize for every student separated by school year
attend_school_grade_student <- attend_student %>%
group_by(#schoolabbr,
student_number, first_name, last_name, school_year) %>%
summarize(enrolled = sum(enrolled),
present = sum(present),
absent = sum(absent),
tardy = sum(tardy)) %>%
ungroup() %>%
select(student_number,
school_year,
enrolled,
present,
absent,
tardy)
attend_school_grade_student_absent <- attend_school_grade_student %>%
#ungroup() %>%
select(student_number,
school_year,
absent) %>%
# tardy) %>%
group_by(student_number, school_year) %>%
spread(key = school_year, value = absent) %>%
rename("absences_1617" = '2016-2017',
"absences_1718" = '2017-2018',
"absences_1819" = '2018-2019')
attend_school_grade_student_enrolled <- attend_school_grade_student %>%
select(student_number,
school_year,
enrolled) %>%
group_by(student_number, school_year) %>%
spread(key = school_year, value = enrolled) %>%
rename("enrolled_1617" = '2016-2017',
"enrolled_1718" = '2017-2018',
"enrolled_1819" = '2018-2019')
attend_student <- attend_school_grade_student_absent %>%
left_join(attend_school_grade_student_enrolled, by = "student_number")
#
# pivot_wider(id_cols = student_number, names_from = school_year, values_from = c(enrolled, absent))
# could ideally spread enrolled and Absences at once, didn't get it to work
#### Courses ####
cc <- get_powerschool("cc") %>%
select(course_number,
studentid,
teacherid,
termid,
schoolid) %>%
filter(termid >= 2700,
schoolid %in% c(7810, 400146, 400163, 400180)) %>%
collect()
schoolstaff <- get_powerschool("schoolstaff") %>%
select(dcid,
id,
users_dcid) %>%
collect()
cc_schoolstaff <- cc %>%
left_join(schoolstaff %>%
rename(teacherid = id), by = "teacherid")
users <- get_powerschool("users") %>%
select(dcid,
email_addr,
first_name,
last_name,
homeschoolid,
teachernumber) %>%
collect()
cc_schoolstaff_users <- cc_schoolstaff %>%
left_join(users %>%
rename(users_dcid = dcid), by = "users_dcid")
teachers_course <- cc_schoolstaff_users %>%
group_by(teachernumber, course_number, termid, schoolid, email_addr, first_name, last_name, homeschoolid) %>%
distinct(teachernumber) %>%
filter(!grepl("att", course_number))
write_csv(teachers_course, "~/Downloads/teachers_course.csv")
#### Displinary - deans list suspensions ####
suspen_raw <- get_deanslist("suspensions") %>%
select(suspension_id,
student_number = student_school_id,
student_first,
student_last,
school_name,
actions,
penalties,
reported_details,
admin_summary,
category,
grade_level_short,
infraction,
issue_ts) %>%
# filter(issue_ts_date >= "2016-08-01 00:00") %>%
collect(n = Inf) %>%
janitor::clean_names("old_janitor")
issue_date_ts <- suspen_raw %>%
pull(issue_ts) %>%
map_df(jsonlite::fromJSON) %>%
pull(date) %>%
ymd_hms(tz = "America/Chicago")
suspen <- suspen_raw %>%
mutate(date = issue_date_ts)
penalties <- suspen$penalties %>%
purrr::map_df(~jsonlite::fromJSON(.x)) %>%
janitor::clean_names("old_janitor") %>%
select(suspensionid,
startdate,
enddate,
numdays,
penaltyname
) %>%
mutate(startdate = ymd(startdate),
enddate = ymd(enddate),
diff_days = enddate - startdate,
numdays = as.integer(numdays)) %>%
arrange(startdate) %>%
#filter(!is.na(startdate)) %>%
mutate(suspensionid = as.integer(suspensionid))
oss <- suspen %>%
inner_join(penalties %>%
filter(str_detect(penaltyname, "Out of School Suspension")),
by = c("suspension_id" = "suspensionid"))
suspen_oss <- oss %>%
mutate(create_date = as_date(date)) %>%
filter(create_date < '2019-08-01') %>%
mutate(create_my = format(create_date, "%Y-%m")) %>%
mutate(create_sy = case_when(
create_my <= '2016-12' | create_my <= '2017-06' ~ "SY-2016-2017",
create_my <= '2017-12' | create_my <= '2018-06' ~ "SY-2017-2018",
create_my <= '2018-12' | create_my <= '2019-06' ~ "SY-2018-2019"
)) %>%
filter(!is.na(create_sy))
suspen_oss_count <- suspen_oss %>%
group_by(#grade_level_short,
student_number,
create_sy) %>%
summarize(suspensions_year = n()) %>%
spread(key = "create_sy", value = "suspensions_year") %>%
#replace(., is.na(.), "0") %>%
rename("suspen_1617" = "SY-2016-2017",
"suspen_1718" = "SY-2017-2018",
"suspen_1819" = "SY-2018-2019")
# Race Codes
races <- data_frame(ethnicity = c(10, 4, 2, 5, 6, 9, 1),
ethnicity_term = c("American Indian", "Asian", "Black", "Latino/Hispanic", "Native Hawaiian or other Pacific Islander",
"Two or more races", "White"))
#### Final Tables ####
# 16-17 SY with attendance, suspensions, and demographic info
all_1617 <- attend_student %>%
select(-c(absences_1718,
absences_1819,
enrolled_1718,
enrolled_1819)) %>%
left_join(suspen_oss_count %>%
select(-c(suspen_1718,
suspen_1819)),
by = "student_number") %>%
left_join(all_enroll_students,
by = "student_number") %>%
filter(yearid == 26,
!is.na(enrolled_1617)) %>%
left_join(map_1617, by = "student_number") %>%
left_join(races, by = "ethnicity")
all_1617$suspen_1617[is.na(all_1617$suspen_1617)] <- 0
all_1617 <- all_1617 %>%
select(student_number,
schoolid,
school_name,
dob,
ethnicity_term,
# student_ethnic_group,
gender,
grade_level,
enrolled_1617,
absences_1617,
suspen_1617,
test_start_date_Mathematics,
test_ritscore_Mathematics,
test_start_date_Reading,
test_ritscore_Reading,
sy_entrydate,
sy_exitdate)
# 17-18 SY with attendance, suspensions, and demographic info
all_1718 <- attend_student %>%
select(-c(absences_1617,
absences_1819,
enrolled_1617,
enrolled_1819)) %>%
left_join(suspen_oss_count %>%
select(-c(suspen_1617,
suspen_1819)),
by = "student_number") %>%
left_join(all_enroll_students,
by = "student_number") %>%
filter(yearid == 27,
!is.na(enrolled_1718)) %>%
left_join(map_1718, by = "student_number") %>%
left_join(races, by = "ethnicity")
all_1718$suspen_1718[is.na(all_1718$suspen_1718)] <- 0
all_1718 <- all_1718 %>%
select(student_number,
schoolid,
school_name,
dob,
ethnicity_term,
# student_ethnic_group,
gender,
grade_level,
enrolled_1718,
absences_1718,
suspen_1718,
test_start_date_Mathematics,
test_ritscore_Mathematics,
test_start_date_Reading,
test_ritscore_Reading,
sy_entrydate,
sy_exitdate)
# 18-19 SY with attendance, suspensions, and demographic info
all_1819 <- attend_student %>%
select(-c(absences_1617,
absences_1718,
enrolled_1617,
enrolled_1718)) %>%
left_join(suspen_oss_count %>%
select(-c(suspen_1617,
suspen_1718)),
by = "student_number") %>%
left_join(all_enroll_students,
by = "student_number") %>%
filter(yearid == 28,
!is.na(enrolled_1819)) %>%
left_join(map_1819, by = "student_number") %>%
left_join(races, by = "ethnicity")
all_1819$suspen_1819[is.na(all_1819$suspen_1819)] <- 0
all_1819 <- all_1819 %>%
select(student_number,
schoolid,
school_name,
dob,
ethnicity_term,
# student_ethnic_group,
gender,
grade_level,
enrolled_1819,
absences_1819,
suspen_1819,
test_start_date_Mathematics,
test_ritscore_Mathematics,
test_start_date_Reading,
test_ritscore_Reading,
sy_entrydate,
sy_exitdate)
write_csv(all_1617, "~/Downloads/RAND_Students_SY16-17.csv")
write_csv(all_1718, "~/Downloads/RAND_Students_SY17-18.csv")
write_csv(all_1819, "~/Downloads/RAND_Students_SY18-19.csv")
|
261fa7e1a07f832672c10952441414bf546f1432
|
2601a446ea97ca9b54438d1bbb9197289a142f68
|
/plot3.R
|
b0700ae2780b71770138387f120ae5617079f0dc
|
[] |
no_license
|
srinivasksh/Exploratory-Data-Analysis_1
|
1bc4d98616bd288360540a692d10fb6708c54cf2
|
489051b14cf1b63c3432455858db2d52ae7f7d82
|
refs/heads/master
| 2021-01-10T03:42:34.906160
| 2016-01-10T11:14:29
| 2016-01-10T11:14:29
| 49,363,391
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,416
|
r
|
plot3.R
|
## Read Power consumption data delimited by ; into a table
power_raw <- read.table("household_power_consumption.txt", sep = ";", header=TRUE)
## Covert values in Date column into Date format
power_raw$Date <- as.Date(power_raw$Date, "%d/%m/%Y")
## Filter data confined to 01-Feb-2007 and 02-Feb-2007
power <- subset(power_raw, Date >= "2007-02-01" & Date <= "2007-02-02")
## Convert Global active power to numeric and create a new column for it
power$GAP <- as.numeric(as.character(power$Global_active_power))
## create a timestamp column by appending Date and Time columns
power$ts <- as.POSIXct(paste(power$Date, power$Time), format="%Y-%m-%d %H:%M:%S")
## convert Sub_metering_1/2/3 columns into numeric and all columns in power
power$sm1 <- as.numeric(as.character(power$Sub_metering_1))
power$sm2 <- as.numeric(as.character(power$Sub_metering_2))
power$sm3 <- as.numeric(as.character(power$Sub_metering_3))
## create a png file of 480*480 px
png("plot3.png", width = 480, height = 480, units = "px")
## create a plot of submetering columns in a single plot distinguished
## by colors along with a legend
plot(power$ts, power$sm1,type="l", xlab = "", ylab = "Energy sub metering")
lines(power$ts, power$sm2, col = "red")
lines(power$ts, power$sm3, col = "blue")
legend("topright", col=c("black","red","blue"), c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 "),lty=c(1,1), lwd=c(1,1))
dev.off()
|
50572438a3641ec5090d6309711f7fc792f1e3a9
|
5dd8887301eb53cfb5ffab58a6f339349ca20919
|
/plot3.r
|
133f77ae446ae34e1b9c6f1cadbcda614163c6c3
|
[] |
no_license
|
andresalvarez/ExData_Plotting1
|
5dd3634308ca37d2dcfe40083cf6de6ca0436e2f
|
fa0bd408721b4a0074499fb0ab166d314f0c026d
|
refs/heads/master
| 2020-12-11T07:44:23.823682
| 2016-05-15T19:59:19
| 2016-05-15T19:59:26
| 58,814,170
| 0
| 0
| null | 2016-05-14T15:08:08
| 2016-05-14T15:08:08
| null |
UTF-8
|
R
| false
| false
| 1,296
|
r
|
plot3.r
|
library(data.table)
setwd("E:\\DataScience Specialization\\course4\\A1\\ExData_Plotting1")
DT <- fread("./household_power_consumption.txt",
select = c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3"),
colClasses = list(date = c("Date"),
time = c("Time"),
numeric = c("Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
),
dec=".", na.strings=c("?") )
ds <- subset(DT, as.Date(DT$Date,"%d/%m/%Y") >=as.Date("01/02/2007","%d/%m/%Y") & as.Date(DT$Date,"%d/%m/%Y") <=as.Date("02/02/2007","%d/%m/%Y"))
png("plot3.png", width = 480, height = 480, bg = "white")
ds[,DateTime:=(as.POSIXct(paste(ds$Date, ds$Time), format="%d/%m/%Y %H:%M:%S"))]
plot(ds$Sub_metering_1~ds$DateTime, type="n", main="", ylab="Energy sub metering",xlab ="")
lines(ds$Sub_metering_1~ds$DateTime)
lines(ds$Sub_metering_2~ds$DateTime, col="red")
lines(ds$Sub_metering_3~ds$DateTime, col="blue")
legend("topright",legend= c("Sub metering 1","Sub metering 2","Sub metering 3"), lty=c(1,1,1), lwd=c(2.5,2.5,2.5),col=c("black","blue","red"))
dev.off()
|
e6c437bf53a3d66557bf4ec4aa1276475d695642
|
6d9c67637ffc0876311953250e2de397beaddccf
|
/Licence_agreement/I_accept/PCModel1350/PCModel/3.00/Models/PCDitch/2.13.16/PCShell/scripts/R_system/3161/PCShell.r
|
1b09fe4eeb082674bc8beb1354184bc7fbb8195c
|
[] |
no_license
|
RedTent/PCModel
|
7ed7aa95503bdd2b531929d05c44ec082d8b2562
|
f98f62e15f1975f80c835fb616b36223b33c5d00
|
refs/heads/master
| 2023-04-18T10:14:26.302989
| 2020-08-28T09:52:50
| 2021-05-06T08:20:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,026
|
r
|
PCShell.r
|
# **************************************************************************
# --------------------------------------------------------------------------
# PCSHELL (compiling cpp modelcode) (Main Program)
# author: Luuk van Gerven (april 2012)
# used libraries: ggplot2, deSolve, RODBC
# --------------------------------------------------------------------------
# **************************************************************************
# -----------------------------------------------------------------------------------------------------------------------------------------------------------------------
# user defined settings
# -----------------------------------------------------------------------------------------------------------------------------------------------------------------------
InclInterface <- T # include PCShell interface; yes (T) or no (F)
if (!(InclInterface)) {
# set location of PCShell and name of work case
dir_SCHIL <- "D:/Luuk/Models/PCDitch/R/PCShell/" # location of PCShell
work_case <- "benchmark_default" # name of work case
} else {
#get name of working directory and path of PCShell from argument provided by batch file
path_work_dir <- commandArgs(trailingOnly = TRUE)
tmp <- unlist(strsplit(path_work_dir,"/"))
work_case <- tmp[length(tmp)]
dir_SCHIL <- paste(paste(tmp[1:(length(tmp)-1)],collapse="/"),"/",sep="")
}
# set these things when running this script stand alone (not from PCShell macro) and including the PCShell interface
#dir_SCHIL <- "D:/Luuk/Models/PCDitch/R/PCShell/" # location of PCShell
#work_case <- "PCLake_default" # name of work case
# -----------------------------------------------------------------------------------------------------------------------------------------------------------------------
# PCShell computation
# -----------------------------------------------------------------------------------------------------------------------------------------------------------------------
#begTime <- Sys.time()
source(paste(dir_SCHIL,"scripts/R/functions.R",sep="")) # Define functions
source(paste(dir_SCHIL,"scripts/R/initialisation.R",sep="")) # Initialisation (read user defined input + convert cpp files of model + compile model)
source(paste(dir_SCHIL,"scripts/R/single_run.R",sep="")) # Run the model (with reference settings)
#runTime <- Sys.time()-begTime
if (InclInterface) if (run_type %in% c(1,2)) source(paste(dir_SCHIL,"scripts/R/sensitivity_analysis.R",sep="")) # Perform sensitivity analysis (run the model with adjusted parameter settings)
source(paste(dir_SCHIL,"scripts/R/produce_output.R",sep="")) # Create output in graphs + files (of time series of state variables and auxiliaries)
|
90450aed5f701d5134461411129ca77205817cb3
|
9d13550ab15bee71e95326a1513d4b62c62197b0
|
/R/annotation.R
|
1edc25318d24268f3c16289a3504261ee3bdae63
|
[] |
no_license
|
cran/genoPlotR
|
8612232d2992605511e71b74bced5378841f66d0
|
3887f91ed718b7df935c11d1a84e9276f4f6b01c
|
refs/heads/master
| 2021-07-21T15:01:58.200076
| 2021-01-07T14:00:02
| 2021-01-07T14:00:02
| 17,696,331
| 4
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,855
|
r
|
annotation.R
|
################################
# Annotation class and methods
################################
# annotation is a set of text and one or two positions for each
# if one, other must be NA
annotation <- function(x1, x2=NA, text, rot=0, col="black"){
if (missing(x1) | missing(text)) stop("Args x1 and text must be provided")
if (!is.numeric(x1)) stop("x1 must be numeric")
if (!(all(is.na(x2)) | is.numeric(x2))) stop("x2 must be numeric or NA")
if (!is.character(text)) stop("text must be character")
as.annotation(data.frame(x1=x1, x2=x2, text=text, stringsAsFactors=FALSE),
rot=rot, col=col)
}
as.annotation <- function(df, x2=NA, rot=0, col="black"){
if (is.annotation(df)) return(df)
if (!all(c("x1", "text") %in% names(df)))
stop("Data frame should have at least a x1 and text column")
# attributes x2, col and arg to all rows if not defined
if (is.null(df$x2)) df$x2 <- x2
if (is.null(df$color)) df$color <- col
if (is.null(df$rot)) df$rot <- rot
class(df) <- c("annotation", "data.frame")
df
}
is.annotation <- function(annotation){
inherits(annotation, "annotation")
}
range.annotation <- function(x, ...){
annotation <- x
range(annotation$x1, annotation$x2, na.rm=TRUE)
}
trim.annotation <- function(x, xlim=NULL, ...){
annotation <- x
xlim <- as.numeric(xlim)
if (!is.null(xlim)){
if (!is.numeric(xlim)) stop("xlim must be numeric")
if (length(xlim) != 2) stop("xlim must be length 2")
# to be accepted, x1 > xlim1 and, if x2=NA, xlim1 also < xlim1 or,
# x2 < xlim2
annotation <- annotation[annotation$x1 >= xlim[1] &
((is.na(annotation$x2) &
annotation$x1 <= xlim[2]) |
(!is.na(annotation$x2) &
annotation$x2 <= xlim[2])),]
}
annotation
}
|
9618b0bf5e22ba6c65fa6c2adcc1e4918e500ec9
|
cd49ddae495f695493c2ff1b692cf69a52a4cb38
|
/sucesohi_tipomuhi_vic_boxplot.R
|
ca3ddf94f4635eeb2a1128df2363eb89c70142a1
|
[
"CC0-1.0"
] |
permissive
|
AdrianGonzalezDS/bookdown-demo-master
|
9ef1f28c5d1812d1998f9c9e4661a46292b015ac
|
1303ecb471c9a95696515c05ba6347b9257e425b
|
refs/heads/main
| 2023-08-31T19:34:48.435682
| 2021-10-14T18:26:59
| 2021-10-14T18:26:59
| 413,385,635
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 3,423
|
r
|
sucesohi_tipomuhi_vic_boxplot.R
|
library(devtools)
library(ggplot2)
library(tidyverse) #requerido para la funcion gather
library(bbplot) #requerido para bbc style
library(plyr) #requerido para hacer gr?ficos de pir?mide
library(dplyr) #requerido para usar la funcion mutate
library(tidyr) #requerido para usar la funcion gather
library(stringr)#requerida para usar str_replace_all
Sys.setlocale("LC_TIME","Spanish_Spain.1252")
startdate <- as.Date(c("2021-01-01"))
enddate <- as.Date(c("2021-06-30"))
#Prepare datos
sucesoshi_tipomuhi_vic <- sucesos %>%
select(infodelito2, prensa,tipo_delito,tipo_muerte,numero_victimas_2) %>%
filter(infodelito2 == "Si" &
tipo_delito == "Homicidio intencional")
sucesoshi_tipomuhi_vic
sucesoshi_tipomuhi_vic$numero_victimas_2[sucesoshi_tipomuhi_vic$numero_victimas_2 == 98] <- 10
sucesoshi_tipomuhi_vic
sucesoshi_tipomuhi_vic_sel <- sucesos %>%
select(infodelito2, prensa, tipo_delito,tipo_muerte,numero_victimas_2) %>%
filter(infodelito2 == "Si" &
tipo_delito == "Homicidio intencional"&
!tipo_muerte %in% c(NA, "NA")&
!numero_victimas_2 %in% c(NA, "NA",99))
sucesoshi_tipomuhi_vic_sel
sucesoshi_tipomuhi_vic_sel$numero_victimas_2[sucesoshi_tipomuhi_vic_sel$numero_victimas_2 == 98] <- 10
sucesoshi_tipomuhi_vic_sel
prensa_sucesoshi_tipomuhi_vic_sel <- length(unique(sucesoshi_tipomuhi_vic_sel[["prensa"]]))
prensa_sucesoshi_tipomuhi_vic_sel
sucesoshi_tipomuhi_vic_sel$prensa<- NULL
#Poniendo acentos en la leyenda
sucesoshi_tipomuhi_vic_sel$tipo_muerte <- str_replace_all(sucesoshi_tipomuhi_vic_sel$tipo_muerte,
"[^[:alnum:]]"," ")
sucesoshi_tipomuhi_vic_sel$tipo_muerte <- str_replace_all(sucesoshi_tipomuhi_vic_sel$tipo_muerte,
"Agresi n grave mortal","Agresión grave mortal")
sucesoshi_tipomuhi_vic_sel$tipo_muerte <- str_replace_all(sucesoshi_tipomuhi_vic_sel$tipo_muerte,
"Ejecuci n extrajudicial","Ejecución extrajudicial")
sucesoshi_tipomuhi_vic_boxplot <- ggplot(sucesoshi_tipomuhi_vic_sel,
aes(x = reorder(tipo_muerte, numero_victimas_2),
y = numero_victimas_2))+
geom_boxplot(outlier.colour = "blue", outlier.shape = 1)+
stat_boxplot(geom ='errorbar', width = 0.6)+
geom_point(alpha=0.4, color="tomato", position = "jitter")+
stat_summary(fun= mean, geom="point", shape=8, size=2)+
theme_classic()+
scale_y_continuous( limits=c(0, 10),
breaks=seq(0,10,1),labels = c("0", "1", "2","3", "4",
"5","6", "7", "8", "9", "\u2265 10")) +
xlab("") +
ylab("Número de víctimas por suceso")+
expand_limits(y = 0)+
coord_flip()+
labs(caption = stringr::str_glue("Fuente: Observatorio de prensa OVV \nn = {nrow(sucesoshi_tipomuhi_vic)} ({sum(is.na(sucesoshi_tipomuhi_vic$numero_victimas_2)| sucesoshi_tipomuhi_vic$numero_victimas_2 == 'NA'| sucesoshi_tipomuhi_vic$numero_victimas_2 == 99)} casos perdidos por información faltante) en {prensa_sucesoshi_tipomuhi_vic_sel} medios de prensa consultados \nPeríodo de recolección de información: {format(startdate, '%d %b')}-{format(enddate, '%d %b %Y')}"))
sucesoshi_tipomuhi_vic_boxplot
|
ced107ec35344d5e76c51fccd4ce7d3aa2f506da
|
75db022357f0aaff30d419c13eafb9dddfce885a
|
/R/getFccir.r
|
0635bc6e3448ecb886022c4c1a4c2bbca5a6c588
|
[] |
no_license
|
LobsterScience/bio.lobster
|
d4c553f0f55f561bb9f9cd4fac52c585e9cd16f8
|
b2af955291cb70c2d994e58fd99d68c6d7907181
|
refs/heads/master
| 2023-09-01T00:12:23.064363
| 2023-08-23T16:34:12
| 2023-08-23T16:34:12
| 60,636,005
| 11
| 5
| null | 2017-01-20T14:35:09
| 2016-06-07T18:18:28
|
R
|
UTF-8
|
R
| false
| false
| 2,079
|
r
|
getFccir.r
|
#' @export
getFccir=function(p){
load(file.path(project.datadirectory("bio.lobster"),"data","exploitationccir.rdata"))
load(file.path(project.datadirectory('bio.lobster'),'outputs','ccir','summary','compiledBinomialModels33.rdata'))
r33=ouBin[,c("LFA","Yr","ERfm")]
load(file.path(project.datadirectory('bio.lobster'),'outputs','ccir','summary','compiledBinomialModels34.rdata'))
r34=ouBin[,c("LFA","Yr","ERfm")]
load(file.path(project.datadirectory('bio.lobster'),'outputs','ccir','summary','compiledBinomialModels2732.rdata'))
r2732=ouBin[,c("LFA","Yr","ERfm")]
ccir = rbind(r2732,r33,r34)
ccir$LFA[ccir$LFA == "LFA 27 South"] = "27S"
ccir$LFA[ccir$LFA == "LFA 27 North"] = "27N"
ccir$LFA[ccir$LFA == "LFA 29"] = "29"
ccir$LFA[ccir$LFA == "LFA 30"] = "30"
ccir$LFA[ccir$LFA == "LFA 31A"] = "31A"
ccir$LFA[ccir$LFA == "LFA 31B"] = "31B"
ccir$LFA[ccir$LFA == "LFA 32"] = "32"
ccir$LFA[ccir$LFA == "LFA 33 East"] = "33E"
ccir$LFA[ccir$LFA == "LFA 33 West"] = "33W"
if(p$Area == "27N" || p$Area == "27S") p$season = c("2000-05-16","2000-07-15") # 27
if(p$Area == "29") p$season = c("2000-05-01","2000-06-30") # 29
if(p$Area == "30") p$season = c("2000-05-20","2000-07-20") # 30
if(p$Area == "31A") p$season = c("2000-04-30","2000-06-30") # 31A
if(p$Area == "31B" || p$Area == "32") p$season = c("2000-04-20","2000-06-20") # 31B & 32
if(p$Area == "33E" || p$Area == "33W") p$season = c("1999-11-28","2000-05-31") # 33
if(p$Area == "34") p$season = c("1999-11-28","2000-05-31") # 34
if(p$Area == "35") p$season = c("1999-10-15","1999-12-31","2000-03-01","2000-07-31") # 35
if(p$Area == "36") p$season = c("1999-11-12","2000-01-15","2000-04-01","2000-06-29") # 36
if(p$Area == "38") p$season = c("1999-11-12","2000-06-25") # 38
e = mean(subset(ccir,LFA==p$Area)$ERfm)
t = as.numeric(as.Date(p$season[2])-as.Date(p$season[1]))/365
F = -log(1-e)/t
return(F)
}
|
7fa4a2561d1f11447408ea2fce4c57fcbf56b768
|
ce6d5c499e05126f0184d466549eba617ef5dd20
|
/man/call_fn.Rd
|
6fc4e7081d9278832713bcb90c0a177a16bee943
|
[
"BSD-2-Clause",
"MIT"
] |
permissive
|
cderv/rlang
|
e929db50350600be716cb6243a99109bca7d4019
|
aa5cdf174cc977faa98a0b8582472a1468d9f98d
|
refs/heads/master
| 2023-08-01T00:44:14.020173
| 2021-09-13T15:16:21
| 2021-09-13T15:16:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 788
|
rd
|
call_fn.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/call.R
\name{call_fn}
\alias{call_fn}
\title{Extract function from a call}
\usage{
call_fn(call, env = caller_env())
}
\arguments{
\item{call}{Can be a call or a quosure that wraps a call.}
\item{env}{The environment where to find the definition of the
function quoted in \code{call} in case \code{call} is not wrapped in a
quosure.}
}
\description{
If a frame or formula, the function will be retrieved from the
associated environment. Otherwise, it is looked up in the calling
frame.
}
\examples{
# Extract from a quoted call:
call_fn(quote(matrix()))
call_fn(quo(matrix()))
# Extract the calling function
test <- function() call_fn(call_frame())
test()
}
\seealso{
\code{\link[=call_name]{call_name()}}
}
|
11060d94fe28d87b964fde2f31a3989ea617d43c
|
ca5f11d0358ab203d9468659c1306d1b186eb206
|
/unused/regression.test.R
|
628cf8b60737da781a1bf4e1c1883f1e40915d87
|
[] |
no_license
|
deepankardatta/blandr
|
75b3a30b2d961fd3c7b12824ab035943f8c01208
|
4d5b1a43536cd1fd9021ff5b1736a7534bc14072
|
refs/heads/v.0.5.3-development
| 2021-12-14T12:45:38.472889
| 2020-03-28T07:15:04
| 2020-03-28T07:15:04
| 95,990,424
| 15
| 9
| null | 2021-12-06T01:33:16
| 2017-07-01T22:25:47
|
R
|
UTF-8
|
R
| false
| false
| 640
|
r
|
regression.test.R
|
statistics.results <- blandr.statistics( giavarina$Method.A , giavarina$Method.B )
# Passed data to the blandr.plot.limits function to
plot.limits <- blandr.plot.limits( statistics.results )
# Pass data to the blandr.ggplot function to use ggplot2 graphics system
ba.plot <- blandr.ggplot( statistics.results , plot.limits )
ba.plot <- ba.plot + ggplot2::geom_smooth(method='lm',formula=y~x)
ba.plot
# Prints out the regression equation
m <- lm( statistics.results$differences ~ statistics.results$means )
a <- signif(coef(m)[1], digits = 2)
b <- signif(coef(m)[2], digits = 2)
textlab <- paste("y = ",b,"x + ",a, sep="")
print(textlab)
|
591c54ff21d5d1d7e43f11dccc6b667c509056b6
|
2d6b65aa4308ee002e27992fcb97f1945052d85b
|
/contact-pred-master/step1.R
|
9be9e1897954dc61ef0685323cc59fe5179afdea
|
[] |
no_license
|
red333/ccm_int
|
aec607c75769008351e66d783cbb425df0211967
|
4d0eeaf4ba2ee8b214bd4a7c572cc5eaf7677108
|
refs/heads/master
| 2021-04-27T14:47:18.997046
| 2018-04-04T21:16:24
| 2018-04-04T21:16:24
| 122,458,596
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,121
|
r
|
step1.R
|
#!/usr/bin/Rscript
#assign gap binary and interface binary to the MSA
#
#myfasta_Align <- "PYRB_1ekxA.alignment.fasta"
step1 <- function(myfasta_Align){
interfacelib = read.table("vi.table")
a = read.fasta(myfasta_Align,rm.dup = T, to.upper = T, to.dash = T)
id = a$id
ali = a$ali
alignmatrix = as.data.frame(ali, stringsAsFactors = F)
gapbinary <- alignmatrix
gapbinary[ gapbinary != "-"] <- "1"
gapbinary[ gapbinary == "-"] <- "0"
interfacebinary <- alignmatrix
for(j in 1:nrow(interfacebinary)) {
tryCatch ({
x <- rownames(interfacebinary[j,])
protein <- substr(x,1,5)
chain <- substr(x,6,6)
index <- gsub(".*/","",x)
startindex <- gsub("-.*","",index)
endindex <- gsub("*.-","",index)
#interfacepair <- "TBA"
interfacepair <- interfacelib[which(substr(interfacelib[,1],1,6) == paste0(protein,chain)),1]
if(length(interfacepair)!=0){
interfacetable <- read.table(paste0("/home/lidaphd/lidaphd/InterEvolLib/superpdbs/out_" ,interfacepair[1], ".pdb.interfaceinfo/molecule_1.txt"))
#interfacetable <- read.table("molecule_1.txt")
interfacetable <- interfacetable[,2]
interfacetable <- unique(interfacetable)
indextrack = as.numeric(startindex)
for(i in 1:ncol(interfacebinary)){
if(interfacebinary[j,i] != "-"){
if(indextrack %in% interfacetable){
interfacebinary[j,i] <- "1"
}else{
interfacebinary[j,i] <- "0"
}
indextrack <- indextrack + 1
}
}
}
}, error = function(e) {})
}
for(j in 1:nrow(interfacebinary)){
tryCatch({
x <- rownames(interfacebinary[j,])
protein <- substr(x,1,5)
chain <- substr(x,6,6)
index <- gsub(".*/","",x)
startindex <- gsub("-.*","",index)
endindex <- gsub("*.-","",index)
#interfacepair <- "TBA"
interfacepair <- interfacelib[which(paste0(substr(interfacelib[,1],1,5),substr(interfacelib[,1],7,7)) == paste0(protein,chain)),1]
if(length(interfacepair)!=0){
interfacetable <- read.table(paste0("/home/lidaphd/lidaphd/InterEvolLib/superpdbs/out_" ,interfacepair[1], ".pdb.interfaceinfo/molecule_2.txt"))
#interfacetable <- read.table("molecule_1.txt")
interfacetable <- interfacetable[,2]
interfacetable <- unique(interfacetable)
indextrack = as.numeric(startindex)
for(i in 1:ncol(interfacebinary)){
if(interfacebinary[j,i] != "-"){
if(indextrack %in% interfacetable | interfacebinary[j,i] == "1"){
interfacebinary[j,i] <- "1"
}else{
interfacebinary[j,i] <- "0"
}
indextrack <- indextrack + 1
}
}
}
}, error = function(e) {})
}
# b = read.fasta("PYRI_4fyyB.alignment.fasta",rm.dup = T, to.upper = T, to.dash = T)
# id_2 = b$id
# ali_2 = b$ali
# alignmatrix_2 = as.data.frame(ali_2, stringsAsFactors = F)
#
# gapbinary_2 <- alignmatrix_2
# gapbinary_2[ gapbinary_2 != "-"] <- "1"
# gapbinary_2[ gapbinary_2 == "-"] <- "0"
#
# interfacebinary_2 <- alignmatrix_2
#
#
# for(j in 1:nrow(interfacebinary_2)) {
# tryCatch ({
# x <- rownames(interfacebinary_2[j,])
# protein <- substr(x,1,5)
# chain <- substr(x,6,6)
# index <- gsub(".*/","",x)
# startindex <- gsub("-.*","",index)
# endindex <- gsub("*.-","",index)
#
# #interfacepair <- "TBA"
# interfacepair_2 <- interfacelib[which(substr(interfacelib[,1],1,6) == paste0(protein,chain)),1]
# if(length(interfacepair_2)!=0){
#
# interfacetable_2 <- read.table(paste0("/home/lidaphd/lidaphd/InterEvolLib/superpdbs/out_" ,interfacepair_2[1], ".pdb.interfaceinfo/molecule_1.txt"))
# #interfacetable <- read.table("molecule_1.txt")
# interfacetable_2 <- interfacetable_2[,2]
# interfacetable_2 <- unique(interfacetable_2)
#
# indextrack = as.numeric(startindex)
# for(i in 1:ncol(interfacebinary_2)){
# if(interfacebinary_2[j,i] != "-"){
# if(indextrack %in% interfacetable_2){
# interfacebinary_2[j,i] <- "1"
# }else{
# interfacebinary_2[j,i] <- "0"
# }
#
# indextrack <- indextrack + 1
# }
# }
#
# }
# }, error = function(e) {})
# }
#
# for(j in 1:nrow(interfacebinary_2)){
# tryCatch({
# x <- rownames(interfacebinary_2[j,])
# protein <- substr(x,1,5)
# chain <- substr(x,6,6)
# index <- gsub(".*/","",x)
# startindex <- gsub("-.*","",index)
# endindex <- gsub("*.-","",index)
#
# #interfacepair <- "TBA"
# interfacepair_2 <- interfacelib[which(paste0(substr(interfacelib[,1],1,5),substr(interfacelib[,1],7,7)) == paste0(protein,chain)),1]
# if(length(interfacepair_2)!=0){
#
# interfacetable_2 <- read.table(paste0("/home/lidaphd/lidaphd/InterEvolLib/superpdbs/out_" ,interfacepair[1], ".pdb.interfaceinfo/molecule_2.txt"))
# #interfacetable <- read.table("molecule_1.txt")
# interfacetable_2 <- interfacetable_2[,2]
# interfacetable_2 <- unique(interfacetable_2)
#
# indextrack = as.numeric(startindex)
# for(i in 1:ncol(interfacebinary_2)){
# if(interfacebinary_2[j,i] != "-"){
# if(indextrack %in% interfacetable_2 | interfacebinary_2[j,i] == "1"){
# interfacebinary_2[j,i] <- "1"
# }else{
# interfacebinary_2[j,i] <- "0"
# }
#
# indextrack <- indextrack + 1
# }
# }
#
# }
# }, error = function(e) {})
# }
#interfacebinary_gapbinary <- cbind.data.frame(interfacebinary,gapbinary,deparse.level = 0, stringsAsFactors = F)
return (list(alignmatrix,interfacebinary, gapbinary))
}
|
f3ae23e7cbf03544e13b76294e4b05acac8c4c35
|
49ac865d0a7739438342dff7fc6a2bcf18112994
|
/R/fepsfrontieR.R
|
964eedc3dd3969182ef2c13c9f6ec93df9c8eca1
|
[] |
no_license
|
oliverdippel/fepsfrontieR
|
20163f3c4346a05b2722646f7ba2a75a1898b3b3
|
df789877103bb8d40f2ee686a4e7abaedae689cb
|
refs/heads/master
| 2020-03-25T06:41:57.293032
| 2018-08-04T08:30:04
| 2018-08-04T08:30:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 742
|
r
|
fepsfrontieR.R
|
#' fepsfrontieR: A package for estimation of fixed-effect panel stochastic
#' frontier models by within-model transformation as proposed by Wang & Ho.
#' As proposed, Maximum Likelihood Estimation using nlminb().
#' Confidence Intervals are provided using the Hessian Matrix or
#' (time intensive) Bootstrapping to calculate Standard Error.
#' In addition, a Gibbs Sampler can be chosen as an alternative to MLE.
#'
#' The fepsfrontieR package provides three important functions:
#' sfmfep, SFM.within and SFM.generate
#'
#' @section sfmfep:
#' The sfmfep functions ...
#'
#' @section SFM.within:
#' The SFM.within functions ...
#'
#' @section SFM.generate:
#' The SFM.generate functions ...
#'
#' @docType package
#' @name fepsfrontieR
NULL
|
b1b52de297f7be5e6589d9ee1b0d7f08ea15795e
|
ba14c315f4ed435384c5b48185a5707dcf1ce093
|
/ui.R
|
518b7097ef1281b0562dbc889d03be10790a39f7
|
[] |
no_license
|
antgers/Project_AquaMiner_Periodic
|
0e318e381f1e244ba6858407f22d8900a78d7f6f
|
7e81781d607e83833e1bd2fd60f93bd5995b8497
|
refs/heads/master
| 2021-01-17T19:20:10.620858
| 2016-10-23T20:57:37
| 2016-10-23T20:57:37
| 71,663,263
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,428
|
r
|
ui.R
|
### Version Periodic (Sampling To Sampling) Datasets
#
# Created: 30/08/2016
# Last Modified: 23/10/2016
#
# Author: Gerasimos Antzoulatos (i2s)
#
source("helpers.R")
source("SidebarUi.R")
#----------------------------------------------------
#
shinyUI(
navbarPage( theme = "bootstrap.css",
img(src="Aquamanager-logo.png", class = "img-responsiveLogo"), #align = 'top',
# "AquaSmart",
#---------------------------------------------------------- First MenuPage -- Descriptive Statistics
tabPanel(" Descriptive Statistics ", id="MenuPage_1",
fluidPage( theme = "bootstrap.css",
tags$head(tags$script(src="script2.js")),
actionLink(inputId = "showHideButtonForMenu", label="", icon = icon("filter", lib = "glyphicon")),
sidebarLayout(
sidebarUni,
mainPanel(
tabsetPanel(
tabPanel("Dataset",
h4("Initial dataset:"),
DT::dataTableOutput("dataset"),
hr(),
h4("Final dataset:"),
DT::dataTableOutput("preproc.dataset") #,
# downloadButton('downloadData', 'Download')
), # end tabPanel
tabPanel("Line Plots",
fluidRow(
column(3, uiOutput("line.DimX")),
column(3, uiOutput("line.group.Batch")),
column(3, uiOutput("line.group.Unit")),
br(),
column(3, actionButton(inputId = 'View.Lines', label = 'View...'))
), #end fluidRow
br(),
plotlyOutput("line_plots")
),
tabPanel("Histogram",
fluidRow(
column(3,
sliderInput('numbins', 'Number of Bins:',
min=1, max=50, value=10, step=1) #,
# checkboxInput('houtlier', 'Remove outliers')
),
column(3,
uiOutput("hist.group"),
conditionalPanel(
condition = "input.hgroup != 'None'",
checkboxInput('chkb.hfacet', 'Facets'))
),
column(3,
uiOutput("hist.measures")
),
column(3,
checkboxInput('saveHPlot', "Check to save"),
br(),
actionButton(inputId = 'View.Hist', label = 'View...'),
br()
)
), # end fluidRow
br(),
uiOutput("hist_plots")
#plotOutput("hist_plots", height = 800, width = 800)
), # end tabPanel
tabPanel("Density Plots",
fluidRow(
column(4,
uiOutput("dens.group"),
conditionalPanel(
condition = "input.dgroup != 'None'",
checkboxInput('chkb.dfacet', 'Facets')) #,
# checkboxInput('doutlier', 'Remove outliers')
),
column(4,
uiOutput("dens.measures")
),
column(4,
checkboxInput('saveDPlot', "Check to save"),
actionButton(inputId = 'View.Dens', label = 'View...')
)
), # end fluidRow
br(),
#plotOutput("dens_plots", height = 800, width = 800)
uiOutput("dens_plots")
), # end tabPanel
tabPanel("Box Plots",
fluidRow(
column(4,
uiOutput("boxplot.group"),
conditionalPanel(
condition = "input.bxgroup != 'None'",
checkboxInput('chkb.bxfacet', 'Facets'))
),
column(4,
uiOutput("boxplot.measures"),
checkboxInput('chkb.bxnotch', 'Notches') #,
# checkboxInput('bxoutlier', 'Remove outliers')
),
column(4,
checkboxInput('saveBoxPlot', "Check to save"),
actionButton(inputId = 'View.Boxplot', label = 'View...')
)
), # end fluidRow
br(),
#plotOutput("box_plots", height = 800, width = 800)
uiOutput("box_plots")
), # end tabPanel
tabPanel("Bar Plots",
fluidRow(
column(3,
uiOutput("bar.dim"),
br()
),
column(3,
uiOutput("bar.meas"),
conditionalPanel(
condition = "input.barMeas != 'None'",
radioButtons(inputId = 'radio.sd.se', label = 'Type of error bar',
choices = c('St.Dev', 'SEM'),
selected = 'St.Dev', inline = TRUE) #,
#checkboxInput('baroutlier', 'Remove outliers')
),
br()
),
column(3,
uiOutput("bar.group")
),
column(3,
checkboxInput('saveBarPlot', "Check to save"),
actionButton(inputId = 'View.Barplots', label = 'View...')
),
br()
), # end fluidRow
#plotOutput("bar_plots", height = 800, width = 800)
uiOutput("bar_plots")
), # end tabPanel
tabPanel("Scatter Plots",
fluidRow(
column(3,
uiOutput("sc.dimX"),
radioButtons(inputId = 'method.regress', label = 'Regression line',
choices = c('None', 'lm', 'loess'),
selected = 'None', inline = TRUE ),
br()
),
column(3,
uiOutput("sc.dimY"),
br()
),
column(3,
uiOutput("sc.size"),
checkboxInput('saveScatterPlot', "Check to save"),
br()
),
column(3,
uiOutput("sc.group"),
actionButton(inputId = 'View.Scatterplots', label = 'View...'),
br()
),
br()
), # end fluidRow
plotlyOutput("scatter_plots", height = 600, width = 900)
), # end tabPanel
tabPanel("Summary",
fluidRow(
column(4,
uiOutput("sum.group")
),
column(4,
uiOutput("sum.meas")
),
column(4,
checkboxInput('saveStats', "Save to file"),
actionButton(inputId = 'View.Stats', label = 'View...')
),
br()
), # end fluidRow
tableOutput("summary_stats")
) # end tabPanel
) # end tabsetPanel
) # end mainPanel
) # end sidebarLayout
) # end fluidPage
), # end tabPanel Descriptive Statistics
#---------------------------------------------------------- Second MenuPage --- OLAP Cubes
tabPanel("Pivot Table",
# rpivotTable::rpivotTableOutput('pivTable', height = "800px")
rpivotTableOutput("pivotTable", height = "800px")
),
#---------------------------------------------------------- Third MenuPage --- Machine Learning
tabPanel(" Machine Learning Models ", id="MenuPage_2",
fluidPage(
# tags$head(tags$script(src="script2.js")),
actionLink(inputId = "showHideButtonForMachine", label="", icon = icon("filter", lib = "glyphicon")),
sidebarLayout(
sidebarPanel(
img(src="feedingfish1.png",class = "img-responsive", align = 'middle'),
hr(),
uiOutput("targs.ML.Variables"),
hr(),
uiOutput("preds.ML.Variables"),
hr(),
selectInput(inputId='radioML.model', label=h3("Choose ML model..."),
choices=list("Support Vector Machine (RBF Kernel)"=1,
"Generalized Linear Models (Boosted GLM)"=2,
"Generalized Additive Models (GAMs)"=3,
"Random Forest Regression"=4,
"Multivariate Adaptive Regression Spline (MARS)"=5),
selected=1, multiple=FALSE),
sliderInput("folds", "Folds:",min = 1, max = 20, value = 10),
hr(),
actionButton(inputId = 'goAnalysis', label = 'Start Analysis')
), # end sidebarPanel
mainPanel(tabsetPanel(
tabPanel("Build the Model...",
br(),
h4('Formula:'),
fluidRow(column(12, verbatimTextOutput("fmla.model"))),
hr(),
# fluidRow(column(12, verbatimTextOutput("summary_model"))),
# hr(),
fluidRow(column(6, h4('RMSE:')),
column(6, h4('R-Squared:'))
),
fluidRow(column(6, infoBoxOutput("approvalBox.RMSE")),
column(6, infoBoxOutput("approvalBox.Rsquare"))
),
hr(),
#------ relative importance
hr(),
h4('Variable Importance:'),
fluidRow(column(12, plotlyOutput("plot_ML_Var_Impo",
height = 600, width = 600))),
fluidRow(column(12, verbatimTextOutput("ML.Var.Impo")))
), # end tabPanel Training
tabPanel("Evaluate the Training",
fluidRow(column(4,
sliderInput("perc", "Percentage of population for testing:",
min = 0, max = 100, value = 10, step=0.5)
), # end column
column(4,
sliderInput("thresh.RE", "Threshold of Relative Error:",
min = 0, max = 100, value = c(0,10), step=0.5)
), # end column
column(4,
checkboxInput('saveTesting', "Save to file"),
actionButton(inputId = 'ViewTesting', label = 'View...')
) # end column
), # end fluidRow
plotlyOutput("plot_Testing",height = 800, width = 800),
br(),
h4('Evaluate the training ML model:'),
fluidRow(column(12, verbatimTextOutput("evaluate_model")))
), # end tabPanel Testing
tabPanel("Predict with it...",
# predict response value using user-defined values for each predictor
fluidRow(column(6,
h3("Set values to Predictors:"),
uiOutput("dyn_input.ML")
),
hr(),
column(6,
actionButton(inputId = 'goPrediction', label = 'Start prediction'),
hr(),
h3("Prediction with Machine Learning model..."),
fluidRow(column(12, verbatimTextOutput("prediction.value.ML")))
) # end column
) # end fluidRow
) # end tabPanel Predict
) # end tabsetPanel
) # end mainPanel
) # end sidebarLayout
) # end fluidPage
), # end tabPanel ML Models
#---------------------------------------------------------- Forth MenuPage --- Business Cases
tabPanel(" KPIs Table Estimation ", id="MenuPage_3",
fluidPage(
sidebarPanel(
img(src="feedingfish1.png",class = "img-responsive", align = 'middle'),
hr(),
numericInput('temp.step', 'Step of Temperature:', 1,
min = 1, max = 5, step = 0.5),
hr(),
numericInput('weight.step', 'Step of Weight Categories:', 50,
min = 0, max = 800, step = 10),
hr(),
selectInput(inputId='radioKPI', label=h3("Choose the KPI..."),
choices=list("Biological FCR"=1,
"Economical FCR"=2,
"SFR"=3,
"SGR"=4,
"Mortality %"=5
),
selected=1, multiple=FALSE)
), # end sidebarPanel
mainPanel(tabsetPanel(
tabPanel("Cross-Tabular",
fluidRow(column(10, h4(" KPIs Table estimation by Machine Learning modeling:")),
column(2, actionButton(inputId = 'ViewKPITable', label = 'View KPI Table'))
),
tableOutput("KPI_Table")
), # end tabPanel Cross-Tabular
tabPanel("2D",
fluidRow(column(6, uiOutput("CatAvWt")),
column(6, actionButton(inputId = 'View2D', label = 'View 2D'))
),
plotlyOutput("plot_2D_Table")
), # end tabPanel 2D
tabPanel("3D",
fluidRow(column(3, actionButton(inputId = 'View3D', label = 'View 3D'))
),
plotlyOutput("plot_3D_Table",height = 800, width = 800)
) # end tabPanel 3D
) # end tabSetPanel
) # end mainPanel
) # end fluidPage
), # end tabPanel Business Cases
#---------------------------------------------------------- Fifth MenuPage --- Business Cases
tabPanel(" Methodological Approach ", id="MenuPage_4",
fluidPage(
wellPanel(
# h2(" Knowledge Discovery & Data Mining Process: "),
img(src="KDD_DM_process.png", class = "img-responsive", align = 'middle', width = 800)
) # end wellPanel
) # end fluidPage
), # end tabPanel Business Cases
#---------------------------------------------------------- Sixth MenuPage --- Business Cases
tabPanel(" About ", id="MenuPage_5",
fluidPage(
plotOutput("plot.buzzWords")
) # end fluidPage
) # end tabPanel Business Cases
) # end navbarPage
) # end shinyUI
|
9872823c8e4049bdbda9061c37c2f6c21cb30d0a
|
a6e8109fee1b8cae226b8d84faaf5a97772dc8e7
|
/R/shinyApps/googleMapQuestion1/ui.R
|
bce2565ff974b92f84a9eb9a0398ccf000435d01
|
[
"MIT"
] |
permissive
|
ati-ozgur/stackoverflowQuestions
|
2f92be880e975d7c96b7e216dc1a211d836de7a2
|
647a79f6f55bb8e04d79721082017ca381cc70f3
|
refs/heads/master
| 2020-05-30T18:15:04.873739
| 2017-12-22T12:06:50
| 2017-12-22T12:06:50
| 27,180,389
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 167
|
r
|
ui.R
|
library(plotGoogleMaps)
library(shiny)
shinyUI(fluidPage(
pageWithSidebar(
headerPanel('Map'),
sidebarPanel(""),
mainPanel(uiOutput('mymap'))
)
))
|
3c88126e13edb6ad6758da287a4856ea07b40e6a
|
4bb7250f73d464865f5bbcd59767d3fa3daac269
|
/R/linmod.default.R
|
d3e1081d31ddad7f91a1219eb3cfd7cac39baf98
|
[] |
no_license
|
rcastaneda2/sfunction3
|
640981979a18048e443add7f841e8cbf58f305d4
|
1f9b386a0de6bc777ae29f3f0ca44e82509dd0db
|
refs/heads/master
| 2021-01-10T05:01:19.326827
| 2016-02-08T23:18:33
| 2016-02-08T23:18:33
| 49,517,555
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 244
|
r
|
linmod.default.R
|
linmod.default <-
function(x, y,...){
x<-as.matrix(x)
y<-as.matrix(y)
est<-linmodEst(x,y)
est$fitted.values <- as.vector(x %*% est$coefficients)
est$residuals <- y - est$fitted.values
est$call <- match.call()
class(est) <- "linmod"
est
}
|
bdce48d828e03bfdaf7fab642040a98780a1388b
|
8e8a1c5373df9b08f91f72dfea837955b32d4304
|
/man/dim-GGobiData-ok.rd
|
d626daab227fcb03898d849c076a7cb04f8decdb
|
[] |
no_license
|
cran/rggobi
|
0e7b0a9c4e81e52863eef3dcfe525a6f6d5955f8
|
b0a379b06ceef0903b97b0c364192c7df441fe24
|
refs/heads/master
| 2021-01-10T19:43:46.669608
| 2018-07-07T15:20:03
| 2018-07-07T15:20:03
| 17,699,210
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 347
|
rd
|
dim-GGobiData-ok.rd
|
\name{dim.GGobiData}
\alias{dim.GGobiData}
\title{GGobiData dimensions}
\author{Hadley Wickham <h.wickham@gmail.com>}
\description{
Retrieve the dimension of a GGobiData
}
\usage{\S3method{dim}{GGobiData}(x)}
\arguments{
\item{x}{dataset}
}
\examples{
if (interactive()) {
g <- ggobi(mtcars)
dim(g[1])}}
\keyword{attribute}
\keyword{internal}
|
603f132ce613b4f52c2d639d980173b46b11ef0e
|
01eb2bcd3640a9ead4c4bde834814cff000f4ecb
|
/dPCR_plot2.R
|
a695c15625968b1c5da8bedf2ecd1fff4c55b763
|
[] |
no_license
|
devSJR/dpcReport_USER
|
d96178144580aaeb15d2eef2d10363d794192f35
|
a443e68ec9d6968a77394393e133eed2aa96ff07
|
refs/heads/master
| 2020-12-14T07:27:39.445438
| 2017-06-27T12:23:11
| 2017-06-27T12:23:11
| 95,555,484
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,295
|
r
|
dPCR_plot2.R
|
library(dplyr)
library(ggplot2)
all_lines <- readLines("./data/digital_PCR.txt")
all_abstr <- all_lines[grep("^[0-9]+\\. [A-Z]", all_lines)]
potential_abstr <- grep("^[0-9]+\\. [A-Z]", all_lines)
abstr_id <- as.numeric(sapply(strsplit(all_lines[potential_abstr], ". ", fixed = TRUE), first))
abstr_status <- c(TRUE, sapply(2L:length(abstr_id - 1), function(i)
abstr_id[i] - abstr_id[i - 1] == 1 | abstr_id[i + 1] - abstr_id[i] == 1))
ith_id <- 1
abstr_lines <- paste0(all_lines[good_abstr[ith_id]:(good_abstr[ith_id + 1] - 1)], collapse = "")
grepl("digital PCR", abstr_lines) | grepl("dPCR", abstr_lines)
abstr_status <- c(TRUE, sapply(2L:length(abstr_id - 1), function(i)
abstr_id[i] - abstr_id[i - 1] == 1 | abstr_id[i + 1] - abstr_id[i] == 1))
good_abstr <- c(potential_abstr[abstr_status], length(all_lines))
real <- sapply(1L:(length(good_abstr) -1), function(ith_id) {
abstr_lines <- paste0(all_lines[good_abstr[ith_id]:(good_abstr[ith_id + 1] - 1)], collapse = "")
grepl("digital PCR", abstr_lines) | grepl("dPCR", abstr_lines) |
grepl("digital", abstr_lines) & grepl("nucleic", abstr_lines)
})
lapply(strsplit(all_lines[good_abstr[-length(good_abstr)][real]], " "), function(i) {
potential_years <- as.numeric(i[-1])
potential_years[!is.na(potential_years)][1]
})
|
540f042311e27c9b81e2a2bd092b96f6a95a4937
|
0fbc58702c39addfa7949391d92533922dcf9d49
|
/inst/examples/lattice-iris-hist.R
|
d5766142b5d250377648c48d8094b5a1978972a0
|
[] |
no_license
|
yihui/MSG
|
d3d353514464f962a0d987efd8cf32ed50ac901a
|
8693859ef41139a43e32aeec33ab2af700037f82
|
refs/heads/master
| 2021-11-29T08:12:02.820072
| 2021-08-15T17:14:36
| 2021-08-15T17:14:36
| 1,333,662
| 30
| 12
| null | 2021-08-15T17:14:37
| 2011-02-06T05:42:53
|
R
|
UTF-8
|
R
| false
| false
| 198
|
r
|
lattice-iris-hist.R
|
# 三种鸢尾花各自的花萼长度直方图
library(lattice)
print(
histogram(~ Sepal.Length | Species, layout = c(3, 1), data = iris,
xlab = "花萼长度", ylab = "百分数")
)
|
012516a5d8f4e2418213e99eed5974338eda5274
|
1f634cc9a938438cd575ffdc308ff3e94beae27e
|
/bankr/R/Account.R
|
dcc40cf92ca067b3a75625b75fe208666996a68a
|
[
"MIT"
] |
permissive
|
fort-w2021/r6-ex-muskuloes
|
9a28079120f7152407697a5cf207ec2471aabc0d
|
c511a553daad3377ba9e37ec766019fadfdf2147
|
refs/heads/main
| 2023-03-07T14:06:35.251600
| 2021-02-22T18:05:05
| 2021-02-22T18:05:05
| 326,735,725
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,339
|
r
|
Account.R
|
# Adding a field TransactionLog would require that the
# $clone method does a deep copy of the class to prevent errors
# as $clone itself isn't recursive by default.
#' @title Account Class
#'
#' @description
#' This base class for account objects.
#'
#' @importFrom R6 R6Class
#' @importFrom checkmate assert_number
#'
#' @details
#' An Account has a balance.
#' One can deposit or withdraw some money from an account.
#'
#' @family Account
#' @export
Account <- R6Class("Account",
public = list(
#' @field balance Account balance
balance = 0,
#' @description
#' deposit a certain amount (>0) into the account
#'
#' @param amount (`integer()` >0) amount be deposited.
#'
#' @return
#' Returns the object itself, but modified **by reference**
deposit = function(amount = 0) {
checkmate::assert_number(amount, lower = 0)
self$balance <- self$balance + amount
invisible(self)
},
#' @description
#' withdraw a certain amount (>0) from the account.
#'
#' @param amount (`integer()` >0) amount be withdrawn.
#'
#' @return
#' Returns the object itself, but modified **by reference**
withdraw = function(amount = 0) {
checkmate::assert_number(amount, lower = 0)
self$balance <- self$balance - amount
invisible(self)
}
)
)
|
705cb1c837a969da99e40a03fea7fa299db530d4
|
1ff3a51b463c951aa02ef40a89c5a884c94f9516
|
/man/overlaidKernelDensityPlot.Rd
|
1a29569d3bb13c93e147ef81809a054ba1f62d6d
|
[] |
no_license
|
cran/fit.models
|
3a250a89603637cfd2296b4cf25f6bcc8e38eda6
|
2548545703702dbc11c8a2b9ceda8da77777386e
|
refs/heads/master
| 2021-01-10T01:00:23.547075
| 2020-08-02T13:30:02
| 2020-08-02T13:30:02
| 17,696,066
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 594
|
rd
|
overlaidKernelDensityPlot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/overlaidKernelDensityPlot.R
\name{overlaidKernelDensityPlot}
\alias{overlaidKernelDensityPlot}
\title{Overlaid Kernel Density Estimate Plot}
\usage{
overlaidKernelDensityPlot(x, fun, ...)
}
\arguments{
\item{x}{a \code{fit.models} object.}
\item{fun}{a function to extract the desired quantity from \code{x}.}
\item{\dots}{additional arguments are passed to
\code{densityplot}.}
}
\value{
the \code{trellis} object is invisibly returned.
}
\description{
Produces an overlaid kernel density plot.
}
\keyword{hplot}
|
75e55f7ce4f44f4cd058d97609126f2005bf5102
|
5263483e619575a5238491ff0227577a319a5a57
|
/O2_P7_DiagnosticCode_PR_M_MainDashDashCheck.R
|
6affa35aeeed1eba7db838f720ea0b3d27f52a64
|
[] |
no_license
|
abulhassansheikh/O2_P7_DiagnosticCode
|
62ad42ac00d68511f80fac42d608a5d63d6ad6ff
|
1852094b8b43695cb1475fa18decef5825a3b48a
|
refs/heads/master
| 2020-06-12T14:03:10.620098
| 2020-02-21T16:31:14
| 2020-02-21T16:31:14
| 194,322,540
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 875
|
r
|
O2_P7_DiagnosticCode_PR_M_MainDashDashCheck.R
|
################################################
#Load the list of Main Sheet folder Names
#Loaded prefix file
MainDashDashCheck <- function(){
REF.Prefix = read.csv("//192.168.2.32/Group/Data Team/Brand_Update_Location/5_R_Brand_Reference_Files/Brands_Prefix.csv", header = TRUE, row.names=NULL)
for (i in 1:nrow(REF.Prefix)){
BrandFolderLocation = paste("//192.168.2.32/GoogleDrive/Completed Magento Uploads (v 1.0)/",as.character(REF.Prefix[i,1]), sep = "", collapse = NULL)
setwd(BrandFolderLocation)
#Identify the Main--sheet and pull it
x <- Sys.glob("main--*.csv")
#message(i)
#message(REF.Prefix[i,1])
if(length(x)>1){
message("ERROR: ", REF.Prefix[i,1], " : ", x)
}
}
message("")
message("Done")
message("")
message("Version: 1.0")
message("Last Updated: January 23th 2019")
message("Author: Abul Hassan Sheikh")
}
###########
#MainDashDashCheck()
|
8ac8c7f0c1713478cddb814b4c9035e4e1dcd191
|
f2a03d4bc7e2ff7a86d8b35a4e833e6d23df86c3
|
/test_indicator_functions/ReadIndicatorParms.R
|
bcde8af2d198c7545a6160264645741dca322cb7
|
[
"MIT"
] |
permissive
|
NIVA-Denmark/ekostat
|
51d639d2b70ea19f48ecd4428eb9f6fa775a2707
|
2254d7cb9ec52c96a2e26afad1db0cb6c00f6d6a
|
refs/heads/master
| 2021-07-02T20:31:11.389288
| 2019-04-29T12:23:29
| 2019-04-29T12:23:29
| 170,341,805
| 0
| 0
|
MIT
| 2019-04-29T12:23:30
| 2019-02-12T15:28:55
|
R
|
UTF-8
|
R
| false
| false
| 310
|
r
|
ReadIndicatorParms.R
|
ReadParms_chla <- function() {
covparams_CumCover <- haven::read_sas("data/covparms_chla_test.sas7bdat")
parmest_CumCover <- haven::read_sas("data/parmest_chla_test.sas7bdat")
res <- list(
covparams_CumCover = covparams_CumCover,
parmest_CumCover = parmest_CumCover
)
return(res)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.