blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
68e703a2a617d143a2fa272bb5b4d0a1e7a157af
|
34a8fb816af4e9c1d1872708e7da2bee2655cbea
|
/man/N_model.Rd
|
26da738a6ca039b179fb35a32b2199b86d140328
|
[] |
no_license
|
AgronomiaR/seedreg
|
1352db5486133703d041298edc2d2d4f75daed5e
|
7b80724cf113252d9551ef4ef5501ef1de439ae2
|
refs/heads/main
| 2023-05-05T00:07:10.674164
| 2021-05-18T01:35:43
| 2021-05-18T01:35:43
| 339,866,638
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,215
|
rd
|
N_model.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/N_model.R
\name{N_model}
\alias{N_model}
\title{Analysis: graph for not significant trend}
\usage{
N_model(
trat,
resp,
ylab = "Germination (\%)",
error = "SE",
xlab = expression("Temperature ("^"o" * "C)"),
theme = theme_classic(),
legend.position = "top"
)
}
\arguments{
\item{trat}{Numerical vector with treatments (Declare as numeric)}
\item{resp}{Numerical vector containing the response of the experiment.}
\item{ylab}{Dependent variable name (Accepts the \emph{expression}() function)}
\item{error}{Error bar (It can be SE - \emph{default}, SD or FALSE)}
\item{xlab}{Independent variable name (Accepts the \emph{expression}() function)}
\item{theme}{ggplot2 theme (\emph{default} is theme_classic())}
\item{legend.position}{legend position (\emph{default} is "top")}
}
\value{
The function returns an exploratory graph of segments
}
\description{
Ggraph for non-significant trend. Can be used within the multicurve command
}
\examples{
library(seedreg)
data("aristolochia")
attach(aristolochia)
N_model(trat,resp)
}
\author{
Gabriel Danilo Shimizu
Leandro Simoes Azeredo Goncalves
}
\keyword{non-significant}
|
c63a5ca14f6f71e5715e42f6bf2e52aa53c2ca4a
|
753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed
|
/service/paws.cloudsearchdomain/man/search.Rd
|
89f573ee59a0960c4f3137616f024bead32dd003
|
[
"Apache-2.0"
] |
permissive
|
CR-Mercado/paws
|
9b3902370f752fe84d818c1cda9f4344d9e06a48
|
cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983
|
refs/heads/master
| 2020-04-24T06:52:44.839393
| 2019-02-17T18:18:20
| 2019-02-17T18:18:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 15,768
|
rd
|
search.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.cloudsearchdomain_operations.R
\name{search}
\alias{search}
\title{Retrieves a list of documents that match the specified search criteria}
\usage{
search(cursor = NULL, expr = NULL, facet = NULL,
filterQuery = NULL, highlight = NULL, partial = NULL, query,
queryOptions = NULL, queryParser = NULL, return = NULL,
size = NULL, sort = NULL, start = NULL, stats = NULL)
}
\arguments{
\item{cursor}{Retrieves a cursor value you can use to page through large result sets. Use the \code{size} parameter to control the number of hits to include in each response. You can specify either the \code{cursor} or \code{start} parameter in a request; they are mutually exclusive. To get the first cursor, set the cursor value to \code{initial}. In subsequent requests, specify the cursor value returned in the hits section of the response.
For more information, see \href{http://docs.aws.amazon.com/cloudsearch/latest/developerguide/paginating-results.html}{Paginating Results} in the \emph{Amazon CloudSearch Developer Guide}.}
\item{expr}{Defines one or more numeric expressions that can be used to sort results or specify search or filter criteria. You can also specify expressions as return fields.
You specify the expressions in JSON using the form \code{{"EXPRESSIONNAME":"EXPRESSION"}}. You can define and use multiple expressions in a search request. For example:
\code{{"expression1":"_score*rating", "expression2":"(1/rank)*year"}}
For information about the variables, operators, and functions you can use in expressions, see \href{http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-expressions.html#writing-expressions}{Writing Expressions} in the \emph{Amazon CloudSearch Developer Guide}.}
\item{filterQuery}{Specifies a structured query that filters the results of a search without affecting how the results are scored and sorted. You use \code{filterQuery} in conjunction with the \code{query} parameter to filter the documents that match the constraints specified in the \code{query} parameter. Specifying a filter controls only which matching documents are included in the results, it has no effect on how they are scored and sorted. The \code{filterQuery} parameter supports the full structured query syntax.
For more information about using filters, see \href{http://docs.aws.amazon.com/cloudsearch/latest/developerguide/filtering-results.html}{Filtering Matching Documents} in the \emph{Amazon CloudSearch Developer Guide}.}
\item{partial}{Enables partial results to be returned if one or more index partitions are unavailable. When your search index is partitioned across multiple search instances, by default Amazon CloudSearch only returns results if every partition can be queried. This means that the failure of a single search instance can result in 5xx (internal server) errors. When you enable partial results, Amazon CloudSearch returns whatever results are available and includes the percentage of documents searched in the search results (percent-searched). This enables you to more gracefully degrade your users' search experience. For example, rather than displaying no results, you could display the partial results and a message indicating that the results might be incomplete due to a temporary system outage.}
\item{query}{[required] Specifies the search criteria for the request. How you specify the search criteria depends on the query parser used for the request and the parser options specified in the \code{queryOptions} parameter. By default, the \code{simple} query parser is used to process requests. To use the \code{structured}, \code{lucene}, or \code{dismax} query parser, you must also specify the \code{queryParser} parameter.
For more information about specifying search criteria, see \href{http://docs.aws.amazon.com/cloudsearch/latest/developerguide/searching.html}{Searching Your Data} in the \emph{Amazon CloudSearch Developer Guide}.}
\item{queryOptions}{Configures options for the query parser specified in the \code{queryParser} parameter. You specify the options in JSON using the following form \code{{\"OPTION1\":\"VALUE1\",\"OPTION2\":VALUE2\"...\"OPTIONN\":\"VALUEN\"}.}
The options you can configure vary according to which parser you use:
\itemize{
\item \code{defaultOperator}: The default operator used to combine individual terms in the search string. For example: \code{defaultOperator: 'or'}. For the \code{dismax} parser, you specify a percentage that represents the percentage of terms in the search string (rounded down) that must match, rather than a default operator. A value of \code{0\%} is the equivalent to OR, and a value of \code{100\%} is equivalent to AND. The percentage must be specified as a value in the range 0-100 followed by the percent (%) symbol. For example, \code{defaultOperator: 50\%}. Valid values: \code{and}, \code{or}, a percentage in the range 0%-100% (\code{dismax}). Default: \code{and} (\code{simple}, \code{structured}, \code{lucene}) or \code{100} (\code{dismax}). Valid for: \code{simple}, \code{structured}, \code{lucene}, and \code{dismax}.
\item \code{fields}: An array of the fields to search when no fields are specified in a search. If no fields are specified in a search and this option is not specified, all text and text-array fields are searched. You can specify a weight for each field to control the relative importance of each field when Amazon CloudSearch calculates relevance scores. To specify a field weight, append a caret (\code{^}) symbol and the weight to the field name. For example, to boost the importance of the \code{title} field over the \code{description} field you could specify: \code{"fields":["title^5","description"]}. Valid values: The name of any configured field and an optional numeric value greater than zero. Default: All \code{text} and \code{text-array} fields. Valid for: \code{simple}, \code{structured}, \code{lucene}, and \code{dismax}.
\item \code{operators}: An array of the operators or special characters you want to disable for the simple query parser. If you disable the \code{and}, \code{or}, or \code{not} operators, the corresponding operators (\code{+}, \code{|}, \code{-}) have no special meaning and are dropped from the search string. Similarly, disabling \code{prefix} disables the wildcard operator (\code{*}) and disabling \code{phrase} disables the ability to search for phrases by enclosing phrases in double quotes. Disabling precedence disables the ability to control order of precedence using parentheses. Disabling \code{near} disables the ability to use the ~ operator to perform a sloppy phrase search. Disabling the \code{fuzzy} operator disables the ability to use the ~ operator to perform a fuzzy search. \code{escape} disables the ability to use a backslash (\code{\}) to escape special characters within the search string. Disabling whitespace is an advanced option that prevents the parser from tokenizing
\item \code{phraseFields}: An array of the \code{text} or \code{text-array} fields you want to use for phrase searches. When the terms in the search string appear in close proximity within a field, the field scores higher. You can specify a weight for each field to boost that score. The \code{phraseSlop} option controls how much the matches can deviate from the search string and still be boosted. To specify a field weight, append a caret (\code{^}) symbol and the weight to the field name. For example, to boost phrase matches in the \code{title} field over the \code{abstract} field, you could specify: \code{"phraseFields":["title^3", "plot"]} Valid values: The name of any \code{text} or \code{text-array} field and an optional numeric value greater than zero. Default: No fields. If you don't specify any fields with \code{phraseFields}, proximity scoring is disabled even if \code{phraseSlop} is specified. Valid for: \code{dismax}.
\item \code{phraseSlop}: An integer value that specifies how much matches can deviate from the search phrase and still be boosted according to the weights specified in the \code{phraseFields} option; for example, \code{phraseSlop: 2}. You must also specify \code{phraseFields} to enable proximity scoring. Valid values: positive integers. Default: 0. Valid for: \code{dismax}.
\item \code{explicitPhraseSlop}: An integer value that specifies how much a match can deviate from the search phrase when the phrase is enclosed in double quotes in the search string. (Phrases that exceed this proximity distance are not considered a match.) For example, to specify a slop of three for dismax phrase queries, you would specify \code{"explicitPhraseSlop":3}. Valid values: positive integers. Default: 0. Valid for: \code{dismax}.
\item \code{tieBreaker}: When a term in the search string is found in a document's field, a score is calculated for that field based on how common the word is in that field compared to other documents. If the term occurs in multiple fields within a document, by default only the highest scoring field contributes to the document's overall score. You can specify a \code{tieBreaker} value to enable the matches in lower-scoring fields to contribute to the document's score. That way, if two documents have the same max field score for a particular term, the score for the document that has matches in more fields will be higher. The formula for calculating the score with a tieBreaker is \code{(max field score) + (tieBreaker) * (sum of the scores for the rest of the matching fields)}. Set \code{tieBreaker} to 0 to disregard all but the highest scoring field (pure max): \code{"tieBreaker":0}. Set to 1 to sum the scores from all fields (pure sum): \code{"tieBreaker":1}. Valid values: 0.0 to 1.0. D
}}
\item{queryParser}{Specifies which query parser to use to process the request. If \code{queryParser} is not specified, Amazon CloudSearch uses the \code{simple} query parser.
Amazon CloudSearch supports four query parsers:
\itemize{
\item \code{simple}: perform simple searches of \code{text} and \code{text-array} fields. By default, the \code{simple} query parser searches all \code{text} and \code{text-array} fields. You can specify which fields to search by with the \code{queryOptions} parameter. If you prefix a search term with a plus sign (+) documents must contain the term to be considered a match. (This is the default, unless you configure the default operator with the \code{queryOptions} parameter.) You can use the \code{-} (NOT), \code{|} (OR), and \code{*} (wildcard) operators to exclude particular terms, find results that match any of the specified terms, or search for a prefix. To search for a phrase rather than individual terms, enclose the phrase in double quotes. For more information, see \href{http://docs.aws.amazon.com/cloudsearch/latest/developerguide/searching-text.html}{Searching for Text} in the \emph{Amazon CloudSearch Developer Guide}.
\item \code{structured}: perform advanced searches by combining multiple expressions to define the search criteria. You can also search within particular fields, search for values and ranges of values, and use advanced options such as term boosting, \code{matchall}, and \code{near}. For more information, see \href{http://docs.aws.amazon.com/cloudsearch/latest/developerguide/searching-compound-queries.html}{Constructing Compound Queries} in the \emph{Amazon CloudSearch Developer Guide}.
\item \code{lucene}: search using the Apache Lucene query parser syntax. For more information, see \href{http://lucene.apache.org/core/4_6_0/queryparser/org/apache/lucene/queryparser/classic/package-summary.html#package_description}{Apache Lucene Query Parser Syntax}.
\item \code{dismax}: search using the simplified subset of the Apache Lucene query parser syntax defined by the DisMax query parser. For more information, see \href{http://wiki.apache.org/solr/DisMaxQParserPlugin#Query_Syntax}{DisMax Query Parser Syntax}.
}}
\item{return}{Specifies the field and expression values to include in the response. Multiple fields or expressions are specified as a comma-separated list. By default, a search response includes all return enabled fields (\code{_all_fields}). To return only the document IDs for the matching documents, specify \code{_no_fields}. To retrieve the relevance score calculated for each document, specify \code{_score}.}
\item{size}{Specifies the maximum number of search hits to include in the response.}
\item{sort}{Specifies the fields or custom expressions to use to sort the search results. Multiple fields or expressions are specified as a comma-separated list. You must specify the sort direction (\code{asc} or \code{desc}) for each field; for example, \code{year desc,title asc}. To use a field to sort results, the field must be sort-enabled in the domain configuration. Array type fields cannot be used for sorting. If no \code{sort} parameter is specified, results are sorted by their default relevance scores in descending order: \code{_score desc}. You can also sort by document ID (\code{_id asc}) and version (\code{_version desc}).
For more information, see \href{http://docs.aws.amazon.com/cloudsearch/latest/developerguide/sorting-results.html}{Sorting Results} in the \emph{Amazon CloudSearch Developer Guide}.}
\item{start}{Specifies the offset of the first search hit you want to return. Note that the result set is zero-based; the first result is at index 0. You can specify either the \code{start} or \code{cursor} parameter in a request, they are mutually exclusive.
For more information, see \href{http://docs.aws.amazon.com/cloudsearch/latest/developerguide/paginating-results.html}{Paginating Results} in the \emph{Amazon CloudSearch Developer Guide}.}
\item{stats}{Specifies one or more fields for which to get statistics information. Each specified field must be facet-enabled in the domain configuration. The fields are specified in JSON using the form:
\code{{"FIELD-A":{},"FIELD-B":{}}}
There are currently no options supported for statistics.}
}
\description{
Retrieves a list of documents that match the specified search criteria. How you specify the search criteria depends on which query parser you use. Amazon CloudSearch supports four query parsers:
}
\details{
\itemize{
\item \code{simple}: search all \code{text} and \code{text-array} fields for the specified string. Search for phrases, individual terms, and prefixes.
\item \code{structured}: search specific fields, construct compound queries using Boolean operators, and use advanced features such as term boosting and proximity searching.
\item \code{lucene}: specify search criteria using the Apache Lucene query parser syntax.
\item \code{dismax}: specify search criteria using the simplified subset of the Apache Lucene query parser syntax defined by the DisMax query parser.
}
For more information, see \href{http://docs.aws.amazon.com/cloudsearch/latest/developerguide/searching.html}{Searching Your Data} in the \emph{Amazon CloudSearch Developer Guide}.
The endpoint for submitting \code{Search} requests is domain-specific. You submit search requests to a domain's search endpoint. To get the search endpoint for your domain, use the Amazon CloudSearch configuration service \code{DescribeDomains} action. A domain's endpoints are also displayed on the domain dashboard in the Amazon CloudSearch console.
}
\section{Accepted Parameters}{
\preformatted{search(
cursor = "string",
expr = "string",
facet = "string",
filterQuery = "string",
highlight = "string",
partial = TRUE|FALSE,
query = "string",
queryOptions = "string",
queryParser = "simple"|"structured"|"lucene"|"dismax",
return = "string",
size = 123,
sort = "string",
start = 123,
stats = "string"
)
}
}
|
b14142764fb47e80858e32a8c7520631d5a966ee
|
183d605c11aa8526e3420c91f2a402d98a8cd355
|
/R/lhs_getpairs.r
|
81ead7c90074fdfd13070e4883b153a2ef66f571
|
[] |
no_license
|
CenterForPeaceAndSecurityStudies/MeasuringLandscape
|
aa732d3be364d5097a20174ae5f4b2b7a86bb417
|
06c46f2af800b5d6d0c3c8e5e95e1f747410bbe7
|
refs/heads/master
| 2021-09-09T22:42:01.403895
| 2018-03-20T03:14:39
| 2018-03-20T03:14:39
| 125,955,143
| 1
| 0
| null | 2018-03-20T03:25:40
| 2018-03-20T03:25:40
| null |
UTF-8
|
R
| false
| false
| 2,039
|
r
|
lhs_getpairs.r
|
# These functions involve retrieving string suggestions based on locality sensitive hashing
#
#
#
#
# library(textreuse)
# library(LSHR)
lhs_getpairs <- function(strings, grams, bands_number=400, rows_per_band=5, mc.cores = parallel::detectCores()) {
pairs <- LSHR:::get_similar_pairs_cosine(
grams,
bands_number = bands_number, # increase this number, and you'll get more false positives but fewer misses
rows_per_band = rows_per_band, # basically shrink this number
#distance = "cosine", #redundant if you use pairs_cosine
seed = 1,
mc.cores = parallel::detectCores(),
verbose = T
)
dim(pairs) # 426,604 That's about 9.4 matches per which is almost exactly what we were looking for. 20 bands, 6 rows 344,923,166
suggestions <- as.data.table(pairs) # pairs[order(-N)]
dim(suggestions) # 426,604 That's about 9.4 matches per which is almost exactly what we were looking for. 20 bands, 6 rows 344,923,166
suggestions$a <- strings[suggestions$id1]
suggestions$b <- strings[suggestions$id2]
suggestions[, ab := paste(a, b, sep = "_")]
suggestions[, ba := paste(b, a, sep = "_")]
return(suggestions)
}
eval_lshr <- function(strings, grams, data, bands_number=400, rows_per_band=5) {
suggestions <- lhs_getpairs(strings, grams, bands_number, rows_per_band)
hits <- data$stemmed_ab[data$rex_match == 1] %in% suggestions$ab | data$stemmed_ab[data$rex_match == 1] %in% suggestions$ba
# data$ab[data$rex_match==1][hits]
misses <- !data$stemmed_ab[data$rex_match == 1] %in% suggestions$ab & !data$stemmed_ab[data$rex_match == 1] %in% suggestions$ba
# data$ab[data$rex_match==1][misses] #Anything that's an exact match is being counted as a miss because suggestions don't count themselves
results <- data.frame(
hits = sum(hits),
misses = sum(misses),
suggestions = nrow(suggestions),
qgrams = ncol(grams),
bands_number = bands_number,
rows_per_band = rows_per_band,
suggestions_per = round(nrow(suggestions) / nrow(grams))
)
return(results)
}
|
af87fbe0393277c97587fc401baba97775e9d529
|
5325176ee2337407e603d274e384d7baaf348588
|
/R/aussim_df.R
|
7965dc4cbff08157add101968b3122313da1f15d
|
[] |
no_license
|
xxzhiyouwo/PensionAge
|
8af1b42f76b7ed59a7fb4c40862db1e998a09d34
|
05601719b2bc5cfab68437581ac6e77767c6756a
|
refs/heads/master
| 2023-04-17T07:32:11.388956
| 2021-05-05T07:38:11
| 2021-05-05T07:38:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 626
|
r
|
aussim_df.R
|
make_aussim_df <- function(aus.sim, maxyear = 2050) {
# Combine simulation results into one tibble
dimnames(aus.sim[["male"]]) <- dimnames(aus.sim[["female"]]) <- list(
Age = dimnames(aus.sim[[1]])[[1]],
Year = as.numeric(dimnames(aus.sim[[1]])[[2]]) + 1,
Rep = dimnames(aus.sim[[1]])[[3]]
)
aussim_df <- bind_rows(
as.tbl_cube(aus.sim[["male"]] / 1e3, met_name = "Population") %>%
as_tibble() %>% mutate(Sex = "Male"),
as.tbl_cube(aus.sim[["female"]] / 1e3, met_name = "Population") %>%
as_tibble() %>% mutate(Sex = "Female")
) %>%
filter(Year <= maxyear)
return(aussim_df)
}
|
41a86c7fe45e699e1f479c760b194cf5a4b5a53e
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.machine.learning/man/comprehend_start_events_detection_job.Rd
|
2d6d324e72df4d16c2dccfa9565e4de01291a2a2
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,672
|
rd
|
comprehend_start_events_detection_job.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/comprehend_operations.R
\name{comprehend_start_events_detection_job}
\alias{comprehend_start_events_detection_job}
\title{Starts an asynchronous event detection job for a collection of documents}
\usage{
comprehend_start_events_detection_job(
InputDataConfig,
OutputDataConfig,
DataAccessRoleArn,
JobName = NULL,
LanguageCode,
ClientRequestToken = NULL,
TargetEventTypes,
Tags = NULL
)
}
\arguments{
\item{InputDataConfig}{[required] Specifies the format and location of the input data for the job.}
\item{OutputDataConfig}{[required] Specifies where to send the output files.}
\item{DataAccessRoleArn}{[required] The Amazon Resource Name (ARN) of the IAM role that grants Amazon
Comprehend read access to your input data.}
\item{JobName}{The identifier of the events detection job.}
\item{LanguageCode}{[required] The language code of the input documents.}
\item{ClientRequestToken}{An unique identifier for the request. If you don't set the client
request token, Amazon Comprehend generates one.}
\item{TargetEventTypes}{[required] The types of events to detect in the input documents.}
\item{Tags}{Tags to associate with the events detection job. A tag is a key-value
pair that adds metadata to a resource used by Amazon Comprehend. For
example, a tag with "Sales" as the key might be added to a resource to
indicate its use by the sales department.}
}
\description{
Starts an asynchronous event detection job for a collection of documents.
See \url{https://www.paws-r-sdk.com/docs/comprehend_start_events_detection_job/} for full documentation.
}
\keyword{internal}
|
4ee4873fc09cb2e927d0c86f83515597278987fd
|
512f85e00d2bf57117cf336f6b1951bdaf2dd3ea
|
/man/data.Rd
|
1ff3f91b073f4066f71f844a392ffe3102acf291
|
[] |
no_license
|
giabaio/survHE
|
a6ee6394000c9920ae97ea7c052d47d0ab241a62
|
17c8d5ba9761e38c577d13506b13cfb43bfb49e0
|
refs/heads/main
| 2023-05-24T18:23:30.529957
| 2023-05-23T11:11:24
| 2023-05-23T11:11:24
| 75,872,554
| 40
| 23
| null | 2023-02-09T17:33:28
| 2016-12-07T20:21:33
|
C++
|
UTF-8
|
R
| false
| true
| 1,062
|
rd
|
data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{data}
\alias{data}
\title{A fictional survival trial.}
\format{
A data frame with 367 rows and 8 variables:
\describe{
\item{ID_patient}{The individual level identifier}
\item{time}{The observed time at which the event happens}
\item{censored}{An indicator to describe whether the
event is fully observed or censored}
\item{arm}{An indicator for the treatment arm, with
0 = control and 1 = active treatment}
\item{sex}{An indicator for the individual's sex, with
0 = male and 1 = female}
\item{age}{A numeric variable with the individual's age}
\item{imd}{A categorical variable representing a measure
of area-level social deprivation}
\item{ethnic}{A categorical variable representing the
individual's ethnic group, as measured from a Census}
}
}
\usage{
data
}
\description{
A dataset containing fictional data from a trial, where
the main outcome is in terms of time-to-event and
censoring indicator and with additional covariates.
}
\keyword{datasets}
|
40c0101493d1be61bfe320cd5d050d9dd18209d7
|
6817390c7d7ca2b221dd9bdfb23b03ed08494c00
|
/R/fct_loadStage.R
|
e350a201e3a9b2a120f970bc2a1d60a457554bbe
|
[] |
no_license
|
nvelden/shinyNGLVieweR
|
4e038154924ee38e39196a0aa0a8fef3174e4d07
|
96f4dd7f25532346b6af5a6f34c72388d502eda2
|
refs/heads/master
| 2023-07-03T07:08:29.154554
| 2021-08-18T14:09:39
| 2021-08-18T14:09:39
| 366,112,797
| 7
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 781
|
r
|
fct_loadStage.R
|
#' Load stage from a .ngl file
#'
#' @description
#' Load stage from a .ngl file
#'
#' @param NGLVieweR NGLVIeweR object.
#' @param stage data.frame of selections loaded from .ngl file.
#'
#' @import NGLVieweR
#' @export
loadStage <- function(NGLVieweR, stage) {
viewer <- NGLVieweR
if (!is.null(stage)) {
viewer <- stageParameters(
viewer,
cameraType = stage$cameraType,
backgroundColor = stage$backgroundColor,
lightIntensity = stage$lightIntensity,
clipNear = stage$clipNear,
clipFar = stage$clipFar
)
} else{
viewer <- stageParameters(
viewer,
cameraType = "perspective",
backgroundColor = "black",
lightIntensity = 1,
clipNear = 0,
clipFar = 100
)
}
return(viewer)
}
|
59212873297c2bfff633ab9f3f3a904257d904dc
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610130943-test.R
|
da8bd6131fd18b8c8cdcd04e68d6b447bc813861
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 735
|
r
|
1610130943-test.R
|
testlist <- list(a = -171L, b = -256L, x = c(-1L, -16383233L, -16320513L, 505085951L, 16777471L, 63996L, -114883070L, -63753L, -54785L, 1845557756L, -109874144L, 1685026146L, 1818569827L, 1862271015L, 704612488L, -2004318072L, -2004318072L, -2004318072L, -2004318072L, -2004353024L, 0L, 0L, 30L, 452984576L, 0L, 16777216L, 0L, 16383225L, 654311424L, -604023506L, -57830L, -42470L, 452984831L, -15007970L, 437911771L, 436263686L, -134217984L, 255L, 505085951L, -1L, -1L, -1L, -41502L, 41L, -64769L, 1442781951L, -1L, -1L, -250L, -65310L, -501343518L, -1L, 505085951L, -1L, -15007745L, -1L, -16384000L, 0L, 851967L, -246L, -1L, -16777216L, 0L, 218103807L, -1L, -1L ))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
83de342faa1df68f354f3238e29c3630a1cd5817
|
5f3198e36d7c42b0ed15e1e364a7bc3b3e00652e
|
/man/reviewNeuronsMoreFrames.Rd
|
d003563fa00709ab09a4e24e0b40fdc9d2f6f803
|
[] |
no_license
|
cran/scalpel
|
f6a2eeca848d9f793810754400de0059ccaa5bda
|
5ed5b98cfd326688c8bdabfdd8d487a83e807768
|
refs/heads/master
| 2021-06-26T13:05:04.209432
| 2021-02-03T04:30:02
| 2021-02-03T04:30:02
| 84,911,225
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,264
|
rd
|
reviewNeuronsMoreFrames.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SCALPEL_reviewNeurons.R
\name{reviewNeuronsMoreFrames}
\alias{reviewNeuronsMoreFrames}
\title{Save additional frames for manually classifying the identified neurons from SCALPEL.}
\usage{
reviewNeuronsMoreFrames(scalpelOutput, neuronSet, numFrames = 10)
}
\arguments{
\item{scalpelOutput}{An object returned by one of the SCALPEL functions:
\code{\link{scalpel}}, \code{\link{scalpelStep2}}, or \code{\link{scalpelStep3}}.}
\item{neuronSet}{The set of neurons that should be reviewed:
use \code{"A"} for those resulting from \code{\link{scalpelStep2}} and saved as \code{scalpelOutput$A}, or use \code{"Afilter"} for those resulting from
\code{\link{scalpelStep3}} and saved as \code{scalpelOutput$Afilter}. This argument is ignored if the class of \code{scalpelOutput} is \code{scalpelStep2}.}
\item{numFrames}{The maximum number of frames that should be saved for each neuron being considered. Each neuron has a number of frames
equal to the number of members in that neuron's cluster that can be plotted. All frames will be saved when the total number of available frames for the neuron
is less than \code{numFrames}. The default value is 10.}
}
\value{
None
}
\description{
We use this function after running \code{\link{reviewNeurons}} or \code{\link{reviewNeuronsInteractive}} to plot additional frames for neurons whose classification was
unclear from the single frame plotted. The additional frames are saved, and the classification
for the neurons can then be updated using \code{\link{updateNeurons}} or \code{\link{updateNeuronsInteractive}}.
}
\examples{
\dontrun{
### many of the functions in this package are interconnected so the
### easiest way to learn to use the package is by working through the vignette,
### which is available at ajpete.com/software
#assumes you have run the example for the "reviewNeuronsInteractive" function
#we save frames for the neurons previously classified
#as "?" using the "reviewNeuronsInteractive" function
reviewNeuronsMoreFrames(scalpelOutput = scalpelOutput, neuronSet = "A")
}
}
\seealso{
\code{\link{reviewNeurons}}, \code{\link{updateNeurons}}, \code{\link{reviewNeuronsInteractive}}, \code{\link{updateNeuronsInteractive}}
}
|
4f71d9fc4d81908da53d73b0ec4d692240084476
|
7a74bf857ab0db8ef9d174e1b8932a26d6880eac
|
/scripts/06_matching.R
|
819d97f5c535eb7c110daed7072b03e1dfca453b
|
[] |
no_license
|
fghjorth/vkme16
|
47669c1753d30f75cac7f38428210d7ee59d77a8
|
fe7966d9a1e3861c0f2be7964038a8f331acf619
|
refs/heads/master
| 2020-04-05T18:57:25.570505
| 2016-12-13T13:28:56
| 2016-12-13T13:28:56
| 51,516,886
| 3
| 5
| null | 2016-11-21T10:04:24
| 2016-02-11T13:37:09
|
R
|
UTF-8
|
R
| false
| false
| 1,446
|
r
|
06_matching.R
|
setwd("~/GitHub/vkme16/")
require(haven)
require(stargazer)
require(cem) #coarsened exact matching
require(dplyr)
#indlæs data
ld<-read_dta("data/6_laddlenz.dta")
#bivariat model
ols1<-lm(vote_l_97~tolabor,data=ld)
summary(ols1)
#model med kontroller (jf Ladd/Lenz s 402)
ols2<-lm(vote_l_97~tolabor+vote_l_92+vote_c_92+vote_lib_92+
labor+conservative+liberal+labfel92+confel92+know_3,data=ld)
summary(ols2)
#sammenlign koefficienterne
stargazer(ols1,ols2,type="text",keep="tolabor")
#reducer data til de variable der er med i ols2
ld<-dplyr::select(ld,vote_l_97,tolabor,vote_l_92,vote_c_92,vote_lib_92,labor,conservative,liberal,labfel92,confel92,know_3)
#konverter fra havens importformat til data frame (som nogle ældre pakker forudsætter)
ld<-as.data.frame(ld)
#evaluer balance
imbalance(group=ld$tolabor,data=ld,drop=c("tolabor","vote_l_97"))
#kør matching
cemmatch1<-cem(treatment="tolabor",data=ld,drop="vote_l_97")
#ny data frame med kun de matchede observationer
ldmatched<-ld[cemmatch1$matched,]
#evaluer balance på det matchede data
imbalance(group=ldmatched$tolabor,data=ldmatched,drop=c("tolabor","vote_l_97"))
#vi kan nu køre regressionen på det matchede data
ols2matched<-lm(vote_l_97~tolabor+vote_l_92+vote_c_92+vote_lib_92+
labor+conservative+liberal+labfel92+confel92+know_3,data=ldmatched)
#sammenlign koefficienterne
stargazer(ols1,ols2,ols2matched,type="text",keep="tolabor")
|
e55284dfab31d5ebb8e2ef88a47da8524113ae7d
|
f6b5afc27bdcb335263f0b97c5a9d5c2b65d0e9a
|
/R/eA1c.R
|
849d223eb7646cdec4ed2430ce7db40b75c80145
|
[] |
no_license
|
marhenriq/cgmquantify
|
ea2a69e27f6aaa62f6e71e3a90f425741ad77b14
|
797f4b59a3a97a8801e54de71a2e659d635cf464
|
refs/heads/main
| 2023-03-01T18:26:11.997739
| 2021-02-10T22:04:06
| 2021-02-10T22:04:06
| 331,686,450
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 470
|
r
|
eA1c.R
|
#' Compute Estimated A1c
#'
#' This function computes the estimated A1c, according to
#' the American Diabetes Association calculator
#'
#' @param df Data frame read through readfile
#' @return A numeric value representing eA1c
#' @examples
#' mydatafile <- system.file("extdata", "my_data_file.csv", package = "cgmquantify")
#' mydata <- readfile(mydatafile)
#' eA1c(mydata)
#' @export
eA1c <- function(df) {
eA1c = (46.7 + mean(df$glucose))/28.7
return(eA1c)
}
|
d15818ee20a601e4aced298acfb0dc3d55766314
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Opt5PL/examples/Deff.Rd.R
|
20d786678635f41900ec25ae7e312e00b21d389c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 579
|
r
|
Deff.Rd.R
|
library(Opt5PL)
### Name: Deff
### Title: Obtaining D-efficiency for estimating model parameters
### Aliases: Deff
### ** Examples
##Under the 5PL model with the parameter values
##T5=(30000,0.5,800,0.5,2) and the dose range [1.95,32000],
##find the D-efficiency of the broad range design.
##The broad range design
dose=c(1.95,7.8,31.25,125,500,2000,8000,32000)
dlength=length(dose)
weight=rep(1/dlength,dlength-1)
##Parameter values
T5=c(30000,0.5,800,0.5,2)
##Compute D-efficiency of the broad range design
Deff(weight,dose,model=5,P=T5,LB=1.95,UB=32000,grid=.01)
|
655ab5d69be6d41a5236d28caeea08343f58ac09
|
fc8a4b06b96c26619d2d9178a6b6f15f87dcb5b0
|
/analysis.R
|
1c937760a91bae8943b094a684ec1693e47a7cc4
|
[] |
no_license
|
chopley/timHuntSentimentAnalysis
|
f41b859970d92aeee6cea3f1c7856e262576d519
|
df7fb978c962aa2a386a8d7c86712a92a768b6d7
|
refs/heads/master
| 2021-01-23T06:44:38.166388
| 2015-07-28T11:10:20
| 2015-07-28T11:10:20
| 38,750,164
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,907
|
r
|
analysis.R
|
require(twitter)
require(sentiment)
require(plyr)
require(ggplot2)
require(wordcloud)
require(RColorBrewer)
require(tm)
#get a list of words that have emotions attached to them
wordList<-read.csv2('AFINN/AFINN-111.txt',sep='\t')
filenameList<-c('2015-06-08_2015-06-09_timhunt','2015-06-09_2015-06-10_timhunt','2015-06-10_2015-06-11_timhunt','2015-06-12_2015-06-13_timhunt',
'2015-06-13_2015-06-14_timhunt','2015-06-15_2015-06-16_timhunt','2015-06-16_2015-06-17_timhunt','2015-06-21_2015-06-22_timhunt',
'2015-06-22_2015-06-23_timhunt','2015-06-23_2015-06-24_timhunt','2015-06-26_2015-06-27_timhunt','2015-06-27_2015-06-28_timhunt',
'2015-06-28_2015-06-29_timhunt')
load(filenameList[1])
df1<-dfTimHunt
for(i in 2:length(filenameList)){
load(filenameList[i])
df1<-rbind(df1,dfTimHunt)
}
# remove retweet entities
some_txt = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", df1$text)
# remove at people
some_txt = gsub("@\\w+", "", some_txt)
# remove punctuation
some_txt = gsub("[[:punct:]]", "", some_txt)
# remove numbers
some_txt = gsub("[[:digit:]]", "", some_txt)
# remove html links
some_txt = gsub("http\\w+", "", some_txt)
# remove unnecessary spaces
some_txt = gsub("[ \t]{2,}", " ", some_txt)
some_txt = gsub("^\\s+|\\s+$", "", some_txt)
some_txt<-tolower(some_txt)
#split the strings up
splitStrings<-strsplit(some_txt," ")
nSamples<-length(splitStrings)
myCorpus <- Corpus(VectorSource(some_txt))
my_stopwords <- c(stopwords('english'))
myCorpus <- tm_map(myCorpus, removeWords, my_stopwords,lazy=TRUE)
for(i in 1:length(myCorpus)){
i
#we use try to handle the cases where our cleaning has thrown out everything in the tweet :(
try({
#get the list of words remaining after cleaning a specific tweet
a<-as.character(myCorpus[[i]])
#split the words by space
words<-strsplit(a," ")
#find how many words are in this particular tweet
nWords <-length(words[[1]])
aSum<-0
#now go through those words and create a vector that contains the emotional value of each word
for(j in 1:nWords){
#find the values for words contained in the lists
aSum<-cbind(aSum,wordList[words[[1]][j]==wordList[1]][2])
}
#now try and find the mean of that vector- this should give an idea of how the tweeter felt about Tim Hun
df1$emotion[i]<-mean(as.numeric(aSum),na.rm=TRUE)
},silent=TRUE)
}
#now we plot histograms of the data
par(mfrow=c(4,4))
tstart <- strptime("2015-06-09 02:54:17 UTC", format="%Y-%m-%d %H:%M:%S",tz="UTC")
tend<-tstart+ 60*60*24
meanEmotion<-mean(df1$emotion[((df1$created<tend)&(df1$created>tstart))])
Title<-paste(tstart,'\nEmotion',round(meanEmotion,3))
hist(df1$emotion[((df1$created<tend)&(df1$created>tstart))],30,main=Title,xlab='Emotion')
for(i in 1:20){
tstart<-tstart + 60*60*24
tend<-tend+ 60*60*24
meanEmotion<-mean(df1$emotion[((df1$created<tend)&(df1$created>tstart))])
Title<-paste(tstart,'\nEmotion',round(meanEmotion,3))
try(hist(df1$emotion[((df1$created<tend)&(df1$created>tstart))],30,main=Title,xlab='Emotion'))
}
#create a corpus from the strings
myCorpus <- Corpus(VectorSource(df1$text))
#transfomr to lower
myCorpus <- tm_map(myCorpus, content_transformer(tolower),lazy=TRUE)
#remove punctuation
myCorpus <- tm_map(myCorpus, content_transformer(removePunctuation),lazy=TRUE)
my_stopwords <- c(stopwords('english'))
myCorpus <- tm_map(myCorpus, removeWords, my_stopwords,lazy=TRUE)
myCorpus <- tm_map(myCorpus, stripWhitespace,lazy=TRUE)
inspect(myCorpus[1:2])
myDTM <- TermDocumentMatrix(myCorpus)
dataframe<-data.frame(text=unlist(sapply(myCorpus, `[`)), stringsAsFactors=F)
# classify emotion
class_emo = classify_emotion(some_txt, algorithm="bayes", prior=1.0)
# get emotion best fit
emotion = class_emo[,7]
# substitute NA's by "unknown"
emotion[is.na(emotion)] = "unknown"
|
1586e89a3cc1c375174fd35dfe973488cba3e398
|
70c2171ef10f71b24c218a27bf4276b3c8779f61
|
/armpipeline_r/ARM/improvement.R
|
fd917d45bccb53764d56fe8f5299e09f913d5fd2
|
[] |
no_license
|
DataAnalyticsinStudentHands/RPipeLine
|
295b873e0510727113f77bc86af31d8d95ee06c4
|
13c59e9e5a11222e22bacd3f73d392a4da0331dd
|
refs/heads/master
| 2021-01-13T17:06:58.535140
| 2017-01-26T19:21:40
| 2017-01-26T19:21:40
| 69,997,633
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,415
|
r
|
improvement.R
|
improvement <- function(rules, t = 0.1) {
# This function determines whether the OR of a rule is interesting or if it just
# a consequence of a parent rule. The goal is to eliminate redundant rules with a
# Occam's Razor strategy
# Exctract interesting data from rules structure
i <- rules@lhs@data@i #tags
p <- rules@lhs@data@p #indexes
length <- length(p)-1 #total rules
Rules <- list()
# First, rules are organized in a list structure
for (ind in 1:length) {
size <- p[ind+1] - p[ind]
start<-p[ind]+1
end <- p[ind]+size
s <- start:end
temp <- i[s]
Rules <- c(Rules,list(temp))
}
# initialize logical array "imp" -> tag improved rules
imp <- logical(length(Rules))
imp[1:length(imp)] <- TRUE
for (i in 1:length(Rules)) {
if (length(Rules[[i]])>1) { # rules of length 1 are all significant
Sets <- list()
# produce Sets: list of all subset of current rule
for (j in 1:length(Rules[[i]])-1)
Sets <- c(Sets, combn(Rules[[i]],j,simplify=FALSE))
# look up elements of Sets in Rules and evaluate improvement
for (j in Sets) {
if (length(j) > 0) {
pos <- match(list(j),Rules)
if (!is.na(pos)) {
if (abs(rules@quality$oddsRatio[i]-rules@quality$oddsRatio[pos])<t)
imp[i] <- FALSE
}
}
}
}
}
return(data.frame(imp=imp))
}
|
23bde576f1c596f123930163716701f007185cdd
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/condmixt/examples/gpd.mme.Rd.R
|
c9297e66bcf1804deb3aafdfcb37a948beee9739
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 242
|
r
|
gpd.mme.Rd.R
|
library(condmixt)
### Name: gpd.mme
### Title: Moment Estimator for the Generalized and the Hybrid Pareto
### Distribution
### Aliases: gpd.mme hpareto.mme
### ** Examples
r<-rhpareto(1000,0.1,0,1,trunc=FALSE)
hpareto.mme(r,p=0.991)
|
b771ee8203ee667198079980d917570a909ac7cc
|
f6dcb066042632979fc5ccdd6aa7d796d3191003
|
/Problem Sets/Student Submissions/Problem Set 3/1830531/e3_q1_1830531.R
|
5238da24c8506636c5ef1f7d02bdf9f4cdbd8d3d
|
[] |
no_license
|
NikoStein/ADS19
|
45301bcd68d851053399621dd8a0be784e1cc899
|
90f2439c6de8569f8a69983e0a605fd94e2e9f0a
|
refs/heads/master
| 2020-05-19T19:10:12.411585
| 2020-03-12T00:02:14
| 2020-03-12T00:02:14
| 185,165,255
| 0
| 4
| null | 2019-08-06T06:16:30
| 2019-05-06T09:26:01
|
HTML
|
UTF-8
|
R
| false
| false
| 5,027
|
r
|
e3_q1_1830531.R
|
# Problem Set 3
# Question 1
library(tidyverse)
library(readr)
library(RColorBrewer)
# 1 a)
data <- read_csv2("https://www.bundeswahlleiter.de/dam/jcr/5441f564-1f29-4971-9ae2-b8860c1724d1/ew19_kerg2.csv",
skip = 9)
# consider only big parties
Parteien <- c("CDU","CSU", "GRÜNE", "SPD", "AfD", "DIE LINKE", "FDP")
# 1 b)
#filter out the state level and performance of all the big parties
data %>%
filter(UegGebietsart == "BUND", Gruppenart == "Partei") -> laender
# col-graph for every state and election result colored in
laender %>%
filter(Gruppenname %in% Parteien) %>%
ggplot(aes(x = Gebietsname, y = Prozent, fill = Gruppenname)) +
geom_col() +
theme_light() +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size = 7),
axis.text.y = element_text(size = 5),
axis.title.y = element_text(size = 8),
axis.title.x = element_text(size = 8),
legend.title = element_text(size = 5),
legend.text = element_text(size = 6))
# faceting, to show the results for every state separately
laender %>%
filter(Gruppenname %in% Parteien) %>%
ggplot(aes(x = Gruppenname, y = Prozent)) +
geom_col() +
facet_wrap(~Gebietsname)
# 1 c)
# turn it around and show the parties' performance for every state
pal = colorRampPalette(brewer.pal(9, "Set1"))
laender %>%
filter(Gruppenname %in% Parteien[-2]) %>%
ggplot(aes(x = Gebietsname, y = Prozent, fill = factor(Gebietsname)))+
scale_fill_manual(values = pal(16)) +
geom_col() +
facet_wrap(~Gruppenname) +
theme_light()+
theme(axis.text.x = element_text(angle = 90, hjust = 1, size = 7),
axis.text.y = element_text(size = 5),
axis.title.y = element_text(size = 8),
axis.title.x = element_text(size = 8),
legend.title = element_text(size = 5),
legend.text = element_text(size = 6),
legend.position = "none")
# 1 d)
library(leaflet)
library(raster)
laender %>%
group_by(Gebietsname) %>%
filter(Prozent == max(Prozent)) -> winner
# load level 1 map for germany
germany <- getData("GADM", country="DEU", level=1)
# create dataframe with information we want to map onto the germany map
election = winner %>% ungroup() %>% dplyr::select(Gruppenname, Gebietsname, Prozent)
# merge the spdf with the election results
merge(germany, election, by.x = "NAME_1", by.y = "Gebietsname") -> germany_joined
pal <- colorFactor("Set1", NULL)
polygon_popup <- paste0("<strong>Bundesland: </strong>", germany_joined$NAME_1, "<br>",
"<strong>Gewinnerpartei: </strong>", germany_joined$Gruppenname, "<br>",
"<strong>Anteil: </strong>", round(germany_joined$Prozent, 1),
"<strong> % </strong>")
leaflet() %>%
addProviderTiles("CartoDB.Positron") %>%
addPolygons(data = germany_joined,
fillOpacity = 0.65,
fillColor= ~pal(Gruppenname),
weight = 1,
color = "white",
popup = polygon_popup)
# 1 e)
# filter the data for the winner of every constituency with the respective share of votes
data %>%
filter(Gebietsart == "Kreis", Gruppenart == "Partei") %>%
group_by(Gebietsname) %>%
filter(Prozent == max(Prozent)) -> kreise
# load the map of level 2 for germany
germanyKreise <- getData("GADM", country="DEU", level=2)
germanyKreise$NAME_2
# map is not up to data, in the meantime Göttingen and Osterode merged into one constituency
germanyKreise[["CC_2"]][229] = "03159"
germanyKreise[["CC_2"]][211] = "03159"
germanyKreise[["NAME_2"]][229] = "Göttingen"
# extract information about the elections into one dataframe
electionKreise = kreise %>% ungroup() %>% dplyr::select(Gruppenname, Gebietsname, Prozent, Gebietsnummer)
# merge the dataframe with the spatial_df
merge(germanyKreise, electionKreise, by.x = "CC_2", by.y = "Gebietsnummer") -> germanyKreise_joined
# create the colors for the seven parties
pals <- colorFactor(c("black","#6495ed","#32CD32", "red","#0000ee","#FF6961","yellow"), levels = Parteien, Parteien)
# normalize the percentages in order to fill in the opacity
normal <- function(x, ...){(x - min(x, ...)) / (max(x, ...) - min(x, ...))}
# produce the pop-up display
polygon_popup <- paste0("<strong>Kreis: </strong>", germanyKreise_joined$NAME_2, "<br>",
"<strong>Gewinnerpartei: </strong>", germanyKreise_joined$Gruppenname, "<br>",
"<strong>Anteil: </strong>", round(germanyKreise_joined$Prozent, 2),
"<strong> % </strong>")
# create the mapping via leaflet
leaflet() %>%
addProviderTiles("CartoDB.Positron") %>%
addPolygons(data = germanyKreise_joined,
fillOpacity = ~normal(Prozent, na.rm = TRUE),
fillColor= ~pals(Gruppenname),
weight = 2,
color = "white",
popup = polygon_popup)
|
750e90251e0fe5b9161236de582cad50c43cdbaf
|
ed823b6da656fb94805c7ff74dfd7b921c5624c9
|
/man/mm10.chromosomes.Rd
|
0add80fa00ef86353b24258e1707189f0af19896
|
[] |
no_license
|
vallotlab/ChromSCape
|
cbde454c903445706e75b27aade45a7a68db5986
|
382eac1015cd7f67e448124faf5a917f4c973aa1
|
refs/heads/master
| 2023-03-15T20:18:37.915065
| 2023-03-13T16:46:50
| 2023-03-13T16:46:50
| 191,729,569
| 11
| 5
| null | 2019-07-03T13:06:05
| 2019-06-13T09:10:39
|
R
|
UTF-8
|
R
| false
| true
| 600
|
rd
|
mm10.chromosomes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{mm10.chromosomes}
\alias{mm10.chromosomes}
\title{Data.frame of chromosome length - mm10}
\format{
mm10.chromosomes - a data frame with 24 rows and 3 variables:
\describe{
\item{chr}{Chromosome - character}
\item{start}{Start of the chromosome (bp) - integer}
\item{end}{End of the chromosome (bp) - integer}
}
}
\usage{
data("mm10.chromosomes")
}
\description{
This data frame provides the length of each "canonical" chromosomes of
Mus Musculus (Mouse) genome build mm10.
}
\keyword{datasets}
|
9a1797d69d370181d96af293ce94b58ceefb8294
|
2e731f06724220b65c2357d6ce825cf8648fdd30
|
/BayesMRA/inst/testfiles/rmvn_arma_scalar/libFuzzer_rmvn_arma_scalar/rmvn_arma_scalar_valgrind_files/1612725998-test.R
|
e2a7616dfbec4061333fad9b4145d7c75c84cf7b
|
[] |
no_license
|
akhikolla/updatedatatype-list1
|
6bdca217d940327d3ad42144b964d0aa7b7f5d25
|
3c69a987b90f1adb52899c37b23e43ae82f9856a
|
refs/heads/master
| 2023-03-19T11:41:13.361220
| 2021-03-20T15:40:18
| 2021-03-20T15:40:18
| 349,763,120
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 132
|
r
|
1612725998-test.R
|
testlist <- list(a = 2405990150192.13, b = 4.94065645841247e-324)
result <- do.call(BayesMRA::rmvn_arma_scalar,testlist)
str(result)
|
cf120c14daff0aee5769c9d37d282feb965cdda8
|
2dd4f5b6b22ce1de32fa6e86e9b787e456c81209
|
/[P21_08] Project Code.R
|
6467c9ee959e9e4c6d72c3093eb9606d9cffd040
|
[] |
no_license
|
ethanduncan65/20-Years-Later
|
d1e3a0324d56278b9fda982edb9e53f0a77434d0
|
703dd9cc5775bbc27ff2074dad44bc4a67269059
|
refs/heads/main
| 2023-08-13T05:47:50.360927
| 2021-10-06T21:33:13
| 2021-10-06T21:33:13
| 399,227,890
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,073
|
r
|
[P21_08] Project Code.R
|
#install package for cv.glm()
library(boot)
# load the data
df = read.csv('MLB Team Stats 2015-2019.csv', header = TRUE)
# reduce data to variables of interest
df_playoff = df[-c(3:6, 8:9, 13, 15:20, 27)]
# quick EDA
pairs(df_playoff) ### {win_pct vs. run_diff}
# plot made_playoffs against likely predictors
par(mfrow=c(2,2))
plot(df_playoff$win_pct, df_playoff$made_playoffs)
plot(df_playoff$vsWO_win_pct, df_playoff$made_playoffs)
plot(df_playoff$road_win_pct, df_playoff$made_playoffs)
plot(df_playoff$vsLHP_win_pct, df_playoff$made_playoffs)
# SLR model for win_pct = payroll_M
slr_model_payroll = lm(df_playoff$win_pct ~ df_playoff$payroll_M)
slr1_summ = summary(slr_model_payroll)
# plot data against regression eq.
plot(df_playoff$payroll_M, df_playoff$win_pct)
abline(a=slr1_summ$coefficients[1], b=slr1_summ$coefficients[2], col='red')
# make prediction for Mariner's win_pct
pred_1 = slr1_summ$coefficients[1] + slr1_summ$coefficients[2]*(81.257217)
pred_1
# SLR model for win_pct = Rdiff
slr_model_runDiff = lm(df_playoff$win_pct ~ df_playoff$Rdiff)
summary(slr_model_runDiff)
# LOOCV on orders of payroll_M
x = df_playoff$payroll_M
y = df_playoff$win_pct
# df consisting of payroll_M and win_pct only
df = data.frame(x,y)
# vector to store error for each model
errors = c()
# for loop over the 7 models we want to fit
for (i in 1:7){
set.seed(99)
# fit model based on degree of polynomial
fit.glm.i <- glm(y~poly(x,i))
# extract and append error to storage vector
i_err = cv.glm(df, fit.glm.i)$delta[1]
errors = c(errors, i_err)
}
# plot the errors
plot(c(1:7),errors, xlab = "degree of polynomial", ylab = "error")
lines(c(1:7),errors)
# logistic regression model to predict odds of making playoffs using only payroll_M
logit_modelX = glm(df_playoff$made_playoffs ~df_playoff$payroll_M,
family = binomial(link = "logit"))
summary(logit_modelX)
cv.glm(df_playoff, logit_modelX)$delta[1]
# logistic regression model to predict odds of making playoffs
logit_model = glm(df_playoff$made_playoffs ~ df_playoff$road_win_pct
+ df_playoff$vsWO_win_pct
+ df_playoff$payroll_M, family = binomial(link = "logit"))
logit_1 = summary(logit_model)
cv.glm(df_playoff, logit_model)$delta[1]
# calc. current season stats for Mariners needed for prediction
SEA2021_road_win_pct = 14/(14+20)
SEA2021_vsWO_win_pct = 15/(15+20)
# prediction for Seattle Mariners
lnOdds = logit_1$coefficients[1] + logit_1$coefficients[2]*(SEA2021_road_win_pct)
+ logit_1$coefficients[3]*(SEA2021_vsWO_win_pct)
+ logit_1$coefficients[4]*(81.257217)
pred_prob = exp(lnOdds)/(1+exp(lnOdds))
# calc. current season stats for Boston Red Sox needed for prediction
BOS2021_road_win_pct = 20/(20+10)
BOS2021_vsWO_win_pct = 17/(17+11)
# prediction for Boston Red Sox
lnOdds_BOS = logit_1$coefficients[1] + logit_1$coefficients[2]*(BOS2021_road_win_pct)
+ logit_1$coefficients[3]*(BOS2021_vsWO_win_pct)
+ logit_1$coefficients[4]*(176.846501)
pred_prob_BOS = exp(lnOdds_BOS)/(1+exp(lnOdds_BOS))
|
c91e09bc6c807572e163a40165c57d9df3723423
|
06aa50fc00e7c7ebbdec19450f531222a23aa0d7
|
/man/gradrate.Rd
|
97d934cc61bc46e61b993991542f1637f7c08752
|
[
"MIT"
] |
permissive
|
djliden/youthhealthr
|
5c4074d90956a1bf287c67c26124e71f3a9a427f
|
a1c2958a5d46d77b81fb1c01b81e53efa8941ee9
|
refs/heads/master
| 2023-03-17T06:05:39.024302
| 2021-03-16T22:15:38
| 2021-03-16T22:15:38
| 305,777,929
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 586
|
rd
|
gradrate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datadoc.R
\docType{data}
\name{gradrate}
\alias{gradrate}
\title{2015-2019 4-year graduation rates in Nevada}
\format{
a tibble with 40 rows and 3 columns:
\describe{
\item{Group:}{Race/Ethnicity}
\item{rate:}{4-year graduation rate}
\item{year:}{Graduating class year (e.g. 2015 = class of 2015}
}
}
\source{
\url{https://www.samhsa.gov/data/report/2017-2018-nsduh-state-specific-tables}
}
\usage{
gradrate
}
\description{
A dataset showing the 4-year graduation rate by race in Nevada
}
\keyword{datasets}
|
395600229a21dcd5471c15de8a4662a52d241fc6
|
165cddf1b6eb420642e7ab6035bbb5d8817ceea9
|
/plot4.R
|
ceb085ebef464355dac6ead2536e104fef897dd0
|
[] |
no_license
|
guschini/ExData_Plotting1
|
0ac3715c3abe0765655d6e30f86dd977fcabf118
|
e72130a442d88d412ea0245ffeb7209133cf1a50
|
refs/heads/master
| 2021-01-21T06:19:21.110426
| 2017-02-26T20:21:21
| 2017-02-26T20:21:21
| 83,214,281
| 0
| 0
| null | 2017-02-26T14:21:53
| 2017-02-26T14:21:53
| null |
UTF-8
|
R
| false
| false
| 457
|
r
|
plot4.R
|
source("read_data.R")
do_plot <- function(data = get_data()){
png(filename="plot4.png", width = 480, height = 480)
par(mfrow = c(2, 2))
source("plot2.R")
do_plot(data)
with(data, plot(data$TimeTime, data$Voltage, xlab = "datetime", ylab = "Voltage", type="l"))
source("plot3.R")
do_plot(data)
with(data, plot(data$TimeTime, data$Global_reactive_power, xlab = "datetime", ylab = "Global_reactive_power", type="l"))
dev.off()
}
|
df8029ba709b861a759742720a597232ca05073b
|
13c1757aa797082f47245f0a725ad8ccdcb639fe
|
/man/plot.kendall_fit.Rd
|
6df090d54e30579c00761c352c37e671e6d64a2c
|
[
"MIT"
] |
permissive
|
mstaniak/kendallRandomPackage
|
dfe043554ecbf537fcc103dbb01a1754a981eca5
|
6f7a9728f53272d6538ee306ec47698b7382f4eb
|
refs/heads/master
| 2020-05-22T08:38:23.750713
| 2019-08-26T12:17:05
| 2019-08-26T12:17:05
| 84,684,409
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 479
|
rd
|
plot.kendall_fit.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stable_kendall_fit.R
\name{plot.kendall_fit}
\alias{plot.kendall_fit}
\title{QQ-plot for the result of fitting stable Kendall distribtion.}
\usage{
\method{plot}{kendall_fit}(x, ...)
}
\arguments{
\item{x}{List returned by fit_separate or fit_kendall function.}
\item{...}{Aditional arguments.}
}
\value{
ggplot2 object
}
\description{
QQ-plot for the result of fitting stable Kendall distribtion.
}
|
6a685beee78123622d5df02748f5994f751a37f1
|
ab8d121437f155ea43c86818c728940ac66b5e67
|
/ui.R
|
82f269a1a16364cc291f4dc6aebe8555e93bf6e1
|
[] |
no_license
|
andrewbaxter439/Name_that_country
|
4b5175077fc12420b4549a90fa55586be1d200b3
|
a37750f4be5d9de61f7f0fc7433da4ebceb015fa
|
refs/heads/master
| 2020-07-04T08:47:25.141111
| 2019-08-13T22:53:29
| 2019-08-13T22:53:29
| 202,227,862
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,294
|
r
|
ui.R
|
ui <- fluidPage(
tags$head(tags$style("#c_typing{color: blue;
font-size: 40px;
font-style: bold;
}
#c_title{color: black;
font-size: 40px;
font-style: bold;
}
#c_typing2{color: grey;
font-size: 40px
font-style: bold
}"
)
),
tags$script('
$(document).on("keypress", function (e) {
Shiny.onInputChange("letterpress", e.which);
});
'),
# Application title
br(),
column(4, offset = 4, align = "center", titlePanel("Name that country!")),
# plot and other parts
plotOutput("map"),
br(),
br(),
br(),
br(),
column(6, offset = 3, align = "center", h1(strong(textOutput("c_title")))),
column(6, offset = 3, align = "center", h1(strong(textOutput("c_typing")))),
# column(6, offset = 3, align = "center", h1(strong(textOutput("c_typing2")))),
column(4, offset = 4, align = "center", actionButton("update", "Next")),
column(4, offset = 4, align = "center", checkboxInput(inputId = "hard", label = "Hard Mode", value = FALSE))
)
|
699b69d122cd73b29a8f24dccef865a79c12074e
|
22fa1f93d4008dfb544d6f850f43334d839692f6
|
/analyses/zarchive/models_stan_nsc.R
|
e886780c8ef8bb1e956c343140c67beea07a654e
|
[] |
no_license
|
cchambe12/nscradiocarbon
|
c27f6b4be0b5523b1a32a5cecd009117685603e3
|
6bb9694d9f8a494f73e48225f114a2bb4ee63a5e
|
refs/heads/master
| 2020-08-14T01:24:32.660625
| 2020-06-05T14:09:46
| 2020-06-05T14:09:46
| 215,071,988
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,547
|
r
|
models_stan_nsc.R
|
### 14 October 2019 - Cat
## Now with real data...
### Main Question:
# This is really what I want to show -
# that concentrations vary seasonally A LOT in shallower increments, and only a little in deeper increments.
## And this varies between diffuse and ring-porous wood anatomies
## Main Issues with data:
# Right skew of data for total and total sugar concentration
## zero-inflated starch concentration
## housekeeping
rm(list=ls())
options(stringsAsFactors = FALSE)
library(ggplot2)
library(dplyr)
library(tidyr)
library(brms)
library(rstanarm)
## Load the data
setwd("~/Documents/git/nscradiocarbon/analyses/")
### Ring-porous first:
ring <- read.csv("input/ring.csv")
diff <- read.csv("input/diff.csv")
ring$season <- ifelse(ring$season=="spring", "aspring", ring$season)
ring$season <- ifelse(ring$season=="summer", "bsummer", ring$season)
ring$season <- ifelse(ring$season=="autumn", "cautumn", ring$season)
diff$season <- ifelse(diff$season=="spring", "aspring", diff$season)
diff$season <- ifelse(diff$season=="summer", "bsummer", diff$season)
diff$season <- ifelse(diff$season=="autumn", "cautumn", diff$season)
ring.total <- ring[(ring$method=="total"),]
diff.total <- diff[(diff$method=="total"),]
ring.sugar <- ring[(ring$method=="sugar"),]
diff.sugar <- diff[(diff$method=="sugar"),]
ring.starch <- ring[(ring$method=="starch"),]
diff.starch <- diff[(diff$method=="starch"),]
ringtot.mod <- brm(conc ~ season + (season | increment), data=ring.total,
control=list(max_treedepth = 15,adapt_delta = 0.99),
prior = prior(normal(0, 30), class = "Intercept") + prior(normal(0, 10), class = "sd") +
prior(normal(0,10), class = "b"))
save(ringtot.mod, file="stan/ringtotalconc_randincr.Rdata")
difftot.mod <- brm(conc ~ season + (season | increment), data=diff.total,
control=list(max_treedepth = 15,adapt_delta = 0.99),
prior = prior(normal(0, 30), class = "Intercept") + prior(normal(0, 10), class = "sd") +
prior(normal(0,10), class = "b"))
save(difftot.mod, file="stan/difftotalconc_randincr.Rdata")
ringsug.mod <- brm(conc ~ season + (season | increment), data=ring.sugar,
control=list(max_treedepth = 15,adapt_delta = 0.99),
prior = prior(normal(0, 30), class = "Intercept") + prior(normal(0, 10), class = "sd") +
prior(normal(0,10), class = "b"))
save(ringsug.mod, file="stan/ringsugarconc_randincr.Rdata")
diffsug.mod <- brm(conc ~ season + (season | increment), data=diff.sugar,
control=list(max_treedepth = 15,adapt_delta = 0.99),
prior = prior(normal(0, 30), class = "Intercept") + prior(normal(0, 10), class = "sd") +
prior(normal(0,10), class = "b"))
save(diffsug.mod, file="stan/diffsugarconc_randincr.Rdata")
ringstar.mod <- brm(conc ~ season + (season | increment), data=ring.starch,
control=list(max_treedepth = 15,adapt_delta = 0.99),
prior = prior(normal(0, 30), class = "Intercept") + prior(normal(0, 10), class = "sd") +
prior(normal(0,10), class = "b"))
save(ringstar.mod, file="stan/ringstarchconc_randincr.Rdata")
diffstar.mod <- brm(conc ~ season + (season | increment), data=diff.starch,
control=list(max_treedepth = 15,adapt_delta = 0.99),
prior = prior(normal(0, 30), class = "Intercept") + prior(normal(0, 10), class = "sd") +
prior(normal(0,10), class = "b"))
save(diffstar.mod, file="stan/diffstarchconc_randincr.Rdata")
if(FALSE){
alltot <- full_join(ring.total, diff.total)
alltot$season <- ifelse(alltot$season=="spring", "aspring", alltot$season)
alltot$season <- ifelse(alltot$season=="summer", "bsummer", alltot$season)
alltot$season <- ifelse(alltot$season=="autumn", "cautumn", alltot$season)
allseas.mod <- brm(conc ~ increment*wood + (increment*wood|season), data=alltot,
control=list(max_treedepth = 15,adapt_delta = 0.99))
save(allseas.mod, file="stan/allwood_seasrand.Rdata")
#alltot$ring <- ifelse(alltot$wood=="ring", 1, 0)
allincr.mod <- brm(conc ~ season*wood + (season*wood | increment), data=alltot,
control=list(max_treedepth = 15,adapt_delta = 0.99))
save(allincr.mod, file="stan/allwood_incrrand.Rdata")
#loo1 <- loo(tot.arm.inc)
#loo2 <- loo(tot.arm.student)
#compare_models(loo1, loo2)
launch_shinystan(ringtot.mod)
}
|
1d8d990a6d3ed00dcee76abf276559536804f39c
|
034397bd13dbcff5a3e77ab0480df9eb33468e64
|
/MovieLens.R
|
b4ffeea061694e81fce981e151d42dc524ab1473
|
[] |
no_license
|
ksapaev/MovieLens
|
59a40e341c0f1e623bb1c222d43d5f0fb9da1eab
|
9c77f991e21910beec552b7606211dfb273981c7
|
refs/heads/master
| 2020-12-04T21:58:06.711201
| 2020-01-05T12:28:00
| 2020-01-05T12:28:00
| 231,913,657
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,882
|
r
|
MovieLens.R
|
### Author: Khushnud Sapaev
### Project: MovieLens Project
### Course: HarvardX: PH125.9x - Capstone Project
### GitHub: https://github.com/ksapaev/
###############################
### Dataset and Preparation ###
###############################
# Create edx set and validation set
# Note: this process could take a couple of minutes
# Loading libraries, if does not exist then installing first
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
# MovieLens 10M dataset:
# https://grouplens.org/datasets/movielens/10m/
# http://files.grouplens.org/datasets/movielens/ml-10m.zip
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
ratings <- read.table(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(levels(movieId))[movieId],
title = as.character(title),
genres = as.character(genres))
movielens <- left_join(ratings, movies, by = "movieId")
# Validation set will be 10% of MovieLens data
set.seed(1)
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# Make sure userId and movieId in validation set are also in edx set
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
rm(dl, ratings, movies, test_index, temp, movielens, removed)
############################
### Methods and Analysis ###
############################
#Getting the summary of the dataset. Counting number of rows without NA's
summary(edx)
sum(complete.cases(edx))
summary(validation)
sum(complete.cases(validation))
#Number of rows by genres and ratings
edx %>% group_by(genres) %>% summarize(count = n()) %>% arrange(desc(count))
edx %>% group_by(rating) %>% summarize(count = n()) %>% arrange(desc(count))
#Visualization of ratings
ggplot(edx, aes(rating)) + geom_bar(fill = "orange") +
scale_x_continuous(breaks = c(seq(0.5,5,0.5))) +
scale_y_continuous(breaks = c(seq(0, 2500000, 500000))) +
theme_bw() + theme(panel.border = element_blank()) +
ggtitle("Rating distribution of movies")
# Quantity of unique users and movies
edx %>% summarize(Users = length(unique(userId)), Movies = length(unique(movieId)))
#Distribution of number of ratings per movie
edx %>% group_by(movieId) %>% summarize(count = n()) %>% filter(count == 1) %>%
left_join(edx, by = "movieId") %>% group_by(title) %>% summarize(rating = rating, count = count)
#Visualization of number of ratings per movie
edx %>% count(movieId) %>% ggplot(aes(n)) +
geom_histogram(bins = 25) +
scale_x_log10() +
xlab("Number of ratings") +
ylab("Number of movies") +
ggtitle("Number of ratings per movie")
#Visualization of number of ratings per user
edx %>% count(userId) %>% ggplot(aes(n)) +
geom_histogram(bins = 25) +
scale_x_log10() +
xlab("Number of ratings") +
ylab("Number of users") +
ggtitle("Number of ratings per user")
############################
### Modelling Approach ###
############################
##########################################
### Movie and user effect model
# Mean rating of edx subset, movie and user averages
mu <- mean(edx$rating)
movie_avgs <- edx %>% group_by(movieId) %>% summarize(m_a = mean(rating - mu))
user_avgs <- edx %>% left_join(movie_avgs, by='movieId') %>% group_by(userId) %>%
summarize(u_a = mean(rating - mu - m_a))
# Testing and saving the RMSE result
predicted_ratings <- validation %>% left_join(movie_avgs, by='movieId') %>%
left_join(user_avgs, by='userId') %>%
mutate(predict = mu + m_a + u_a) %>% pull(predict)
movie_user_model_rmse <- RMSE(predicted_ratings, validation$rating)
# Checking the result
rmse_results <- tibble(method="Movie and user effect model", RMSE = movie_user_model_rmse)
rmse_results
##########################################
## Regularized movie and user effect model
# lambda is a tuning parameter
lambdas <- seq(0, 10, 0.25)
# m_a and u_a for each lambda followed by prediction and testing the rating
rmses <- sapply(lambdas, function(l){
mu <- mean(edx$rating)
m_a <- edx %>% group_by(movieId) %>% summarize(m_a = sum(rating - mu)/(n()+l))
u_a <- edx %>% left_join(m_a, by="movieId") %>% group_by(userId) %>%
summarize(u_a = sum(rating - mu - m_a)/(n()+l))
predicted_ratings <- validation %>% left_join(m_a, by = "movieId") %>%
left_join(u_a, by = "userId") %>%
mutate(predict = mu + m_a + u_a) %>% pull(predict)
return(RMSE(predicted_ratings, validation$rating))
})
# Visualization of RMSEs vs lambdas to select the optimal lambda
qplot(lambdas, rmses)
# The optimal lambda is the one with the minimal RMSE
lambda <- lambdas[which.min(rmses)]
# Checking the result with the result from the previous model
rmse_results <- tibble(method="Regularized movie and user effect model", RMSE = min(rmses))
rmse_results %>% knitr::kable()
|
599c8d2a172b43f9636ae6d4c3d6bda87e04b7db
|
43bc17bf2e2ec22df7fac309c416d1788e7c7d83
|
/supervised-learning-in-r-classification/naive-bayes-prediction.R
|
20de19936c0c01e39f7c06a312cef37f871aaf4a
|
[] |
no_license
|
EvanKaeding/datacamp
|
178d1574d6acfd3d77f37e9156cc3abbe95a5937
|
747cd437361da88b4690499ab293c9d1845c7f1e
|
refs/heads/master
| 2020-04-04T18:03:35.472557
| 2019-04-07T00:15:39
| 2019-04-07T00:15:39
| 156,148,026
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,318
|
r
|
naive-bayes-prediction.R
|
# Naive Bayes predictions
download.file(url = "https://assets.datacamp.com/production/course_2906/datasets/locations.csv",
destfile = "bretts-location.csv", method = "curl")
where9am <- read.csv(file = "bretts-location.csv", stringsAsFactors = TRUE)
# Doing a basic predicated prediction
# Compute P(A)
p_A <- nrow(subset(where9am, location == "office"))/nrow(where9am)
# Compute P(B)
p_B <- nrow(subset(where9am, daytype == "weekday"))/nrow(where9am)
# Compute the observed P(A and B)
p_AB <- nrow(subset(where9am, location == "office", daytype == "weekday"))/nrow(where9am)
# Compute P(A | B)
p_A_given_B <- p_AB/p_B
print(p_A_given_B)
## Using the Naivebays package to build a model (NOT AVAILABLE FOR R 3.3)
# Load the naivebayes package
library(e1071)
# Build the location prediction model
locmodel <- naive_bayes(location ~ daytype, data = where9am)
# Predict Thursday's 9am location
predict(locmodel, newdata = data.frame(daytype = as.factor("weekday")))
# Predict Saturdays's 9am location
predict(locmodel, data.frame(daytype = as.factor("weekend")))
## Hashing out some of these probs
# The 'naivebayes' package is loaded into the workspace
# and the Naive Bayes 'locmodel' has been built
# Examine the location prediction model
print(locmodel)
# Obtain the predicted probabilities for Thursday at 9am
predict(locmodel, newdata = thursday9am , type = "prob")
# Obtain the predicted probabilities for Saturday at 9am
predict(locmodel, newdata = saturday9am , type = "prob")
## Building a multi-feature bayes model
# Build a NB model of location
locmodel <- naive_bayes(location ~ daytype + hourtype, data = locations)
# Predict Brett's location on a weekday afternoon
predict(locmodel, data.frame(daytype = "weekday", hourtype = "afternoon"))
# Predict Brett's location on a weekday evening
predict(locmodel, weekday_evening)
## Adding the Laplace correction
# Observe the predicted probabilities for a weekend afternoon
predict(locmodel, weekend_afternoon, type = "prob")
# Build a new model using the Laplace correction
locmodel2 <- naive_bayes(location ~ daytype + hourtype,
data = locations, laplace = 1)
# Observe the new predicted probabilities for a weekend afternoon
predict(locmodel2, weekend_afternoon, type = "prob")
|
1e04b652a5051b791eb46ff3a91dc388c82d165a
|
b856d3b1d21207a026b7f756e1540cde1cc55b0f
|
/man/get_superclasses_information.Rd
|
f9890241503d507945f67ffe2b9c67a71f999642
|
[] |
no_license
|
robertzk/refclass
|
9629beb904f351697b3a43696811912bc5c4f27d
|
8b403c36dd24f9b60b654d8d8926d476b924bd84
|
refs/heads/master
| 2021-01-11T11:10:07.721148
| 2015-10-04T15:58:44
| 2015-10-04T15:58:44
| 24,520,672
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,079
|
rd
|
get_superclasses_information.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{get_superclasses_information}
\alias{get_superclasses_information}
\title{Get superclass information.}
\usage{
get_superclasses_information(contains, where)
}
\arguments{
\item{contains}{character. A character vector of super class names.}
\item{where}{environment. The environment in which to look for superClasses.
# TODO: (RK) Provide ability to look for super classes in multiple environments?}
}
\value{
a list with keys \code{superClasses}, \code{isRefSuperClass}
indicating a character vector of super class names, whether
each is a defined reference class or not, and the super class definitions,
respectively.
}
\description{
Provide a list containing \code{superClasses}, (the actual class names)
\code{isRefSuperClass}, a vector of logicals indicating
whether each superclass is a reference class,
and \code{superClassDefs}, containing the actual superclass
definitions, obtained using \code{methods::getClass}.
}
\details{
Note that \code{envRefClass} will always be one of the returned
superclasses.
}
|
7440523449bcde8770b0461cbeb6b0e8c0068795
|
214bd9de4719c2131e95f4331c9eaf9d03c4b378
|
/scripts/utilityScripts/runDnDsCv.R
|
038b965b5f1d172e519a0d650263fb51b78e362e
|
[] |
no_license
|
ndfriedman/evolution_of_hypermutation
|
ff721eab19adbd1bbca99b0b390fdc347585ccbb
|
28999f742cedc059931217e0fae082c0f4a7c2f8
|
refs/heads/master
| 2023-01-04T02:48:11.022869
| 2020-10-16T15:22:42
| 2020-10-16T15:22:42
| 294,239,633
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,204
|
r
|
runDnDsCv.R
|
#written by noah friedman, help from craig to run
#a template for R scripts for plotting
library(ggplot2)
library(grid)
require(cowplot)
library(egg)
library(dplyr)
library(data.table); setDTthreads(6)
library(dndscv)
library(data.table)
library(ggrepel)
library(plyr)
fix_maf_cols <- function(maf){
maf <- data.table(maf)
mutations <- maf[Variant_Type %in% c("SNP", "INS", "DEL"),.
(sampleID = Tumor_Sample_Barcode,
chr = Chromosome,
pos = Start_Position,
ref = Reference_Allele,
mut = Tumor_Seq_Allele2,
gene= Hugo_Symbol,
hgvs =HGVSp_Short,
status=hypermutantStatus,
cancerType=cancerType
)]
#mutations$chr = paste("chr", as.vector(mutations$chr), sep='')
#gsub("chr","",as.vector(mutations$chr))
return(mutations)
}
run_dnds_on_normal_and_hypermutated_tumors <- function(inputMaf, cancerTypes){
gene_list468 <- c('ABL1', 'ACVR1', 'AGO2', 'AKT1', 'AKT2', 'AKT3', 'ALK', 'ALOX12B', 'AMER1', 'ANKRD11', 'APC', 'AR', 'ARAF', 'ARID1A', 'ARID1B', 'ARID2', 'ARID5B', 'ASXL1', 'ASXL2', 'ATM', 'ATR', 'ATRX', 'AURKA', 'AURKB', 'AXIN1', 'AXIN2', 'AXL', 'B2M', 'BABAM1', 'BAP1', 'BARD1', 'BBC3', 'BCL10', 'BCL2', 'BCL2L1', 'BCL2L11', 'BCL6', 'BCOR', 'BIRC3', 'BLM', 'BMPR1A', 'BRAF', 'BRCA1', 'BRCA2', 'BRD4', 'BRIP1', 'BTK', 'CALR', 'CARD11', 'CARM1', 'CASP8', 'CBFB', 'CBL', 'CCND1', 'CCND2', 'CCND3', 'CCNE1', 'CD274', 'CD276', 'CD79A', 'CD79B', 'CDC42', 'CDC73', 'CDH1', 'CDK12', 'CDK4', 'CDK6', 'CDK8', 'CDKN1A', 'CDKN1B', 'CDKN2A', 'CDKN2B', 'CDKN2C', 'CEBPA', 'CENPA', 'CHEK1', 'CHEK2', 'CIC', 'CREBBP', 'CRKL', 'CRLF2', 'CSDE1', 'CSF1R', 'CSF3R', 'CTCF', 'CTLA4', 'CTNNB1', 'CUL3', 'CXCR4', 'CYLD', 'CYSLTR2', 'DAXX', 'DCUN1D1', 'DDR2', 'DICER1', 'DIS3', 'DNAJB1', 'DNMT1', 'DNMT3A', 'DNMT3B', 'DOT1L', 'DROSHA', 'DUSP4', 'E2F3', 'EED', 'EGFL7', 'EGFR', 'EIF1AX', 'EIF4A2', 'EIF4E', 'ELF3', 'EP300', 'EPAS1', 'EPCAM', 'EPHA3', 'EPHA5', 'EPHA7', 'EPHB1', 'ERBB2', 'ERBB3', 'ERBB4', 'ERCC2', 'ERCC3', 'ERCC4', 'ERCC5', 'ERF', 'ERG', 'ERRFI1', 'ESR1', 'ETV1', 'ETV6', 'EZH1', 'EZH2', 'FAM175A', 'FAM46C', 'FAM58A', 'FANCA', 'FANCC', 'FAT1', 'FBXW7', 'FGF19', 'FGF3', 'FGF4', 'FGFR1', 'FGFR2', 'FGFR3', 'FGFR4', 'FH', 'FLCN', 'FLT1', 'FLT3', 'FLT4', 'FOXA1', 'FOXL2', 'FOXO1', 'FOXP1', 'FUBP1', 'FYN', 'GATA1', 'GATA2', 'GATA3', 'GLI1', 'GNA11', 'GNAQ', 'GNAS', 'GPS2', 'GREM1', 'GRIN2A', 'GSK3B', 'GTF2I', 'H3F3A', 'H3F3B', 'H3F3C', 'HGF', 'HIST1H1C', 'HIST1H2BD', 'HIST1H3A', 'HIST1H3B', 'HIST1H3C', 'HIST1H3D', 'HIST1H3E', 'HIST1H3F', 'HIST1H3G', 'HIST1H3H', 'HIST1H3I', 'HIST1H3J', 'HIST2H3C', 'HIST2H3D', 'HIST3H3', 'HLA-A', 'HLA-B', 'HNF1A', 'HOXB13', 'HRAS', 'ICOSLG', 'ID3', 'IDH1', 'IDH2', 'IFNGR1', 'IGF1', 'IGF1R', 'IGF2', 'IKBKE', 'IKZF1', 'IL10', 'IL7R', 'INHA', 'INHBA', 'INPP4A', 'INPP4B', 'INPPL1', 'INSR', 'IRF4', 'IRS1', 'IRS2', 'JAK1', 'JAK2', 'JAK3', 'JUN', 'KDM5A', 'KDM5C', 'KDM6A', 'KDR', 'KEAP1', 'KIT', 'KLF4', 'KNSTRN', 'KRAS', 'LATS1', 'LATS2', 'LMO1', 'LYN', 'MALT1', 'MAP2K1', 'MAP2K2', 'MAP2K4', 'MAP3K1', 'MAP3K13', 'MAP3K14', 'MAPK1', 'MAPK3', 'MAPKAP1', 'MAX', 'MCL1', 'MDC1', 'MDM2', 'MDM4', 'MED12', 'MEF2B', 'MEN1', 'MET', 'MGA', 'MITF', 'MLH1','KMT2A', 'KMT2D', 'KMT2C', 'KMT2B', 'MPL', 'MRE11A', 'MSH2', 'MSH3', 'MSH6', 'MSI1', 'MSI2', 'MST1', 'MST1R', 'MTOR', 'MUTYH', 'MYC', 'MYCL1', 'MYCN', 'MYD88', 'MYOD1', 'NBN', 'NCOA3', 'NCOR1', 'NEGR1', 'NF1', 'NF2', 'NFE2L2', 'NFKBIA', 'NKX2-1', 'NKX3-1', 'NOTCH1', 'NOTCH2', 'NOTCH3', 'NOTCH4', 'NPM1', 'NRAS', 'NSD1', 'NTHL1', 'NTRK1', 'NTRK2', 'NTRK3', 'NUF2', 'NUP93', 'PAK1', 'PAK7', 'PALB2', 'PARK2', 'PARP1', 'PAX5', 'PBRM1', 'PDCD1', 'PDCD1LG2', 'PDGFRA', 'PDGFRB', 'PDPK1', 'PGR', 'PHOX2B', 'PIK3C2G', 'PIK3C3', 'PIK3CA', 'PIK3CB', 'PIK3CD', 'PIK3CG', 'PIK3R1', 'PIK3R2', 'PIK3R3', 'PIM1', 'PLCG2', 'PLK2', 'PMAIP1', 'PMS1', 'PMS2', 'PNRC1', 'POLD1', 'POLE', 'PPARG', 'PPM1D', 'PPP2R1A', 'PPP4R2', 'PPP6C', 'PRDM1', 'PRDM14', 'PREX2', 'PRKAR1A', 'PRKCI', 'PRKD1', 'PTCH1', 'PTEN', 'PTP4A1', 'PTPN11', 'PTPRD', 'PTPRS',
'PTPRT', 'RAB35', 'RAC1', 'RAC2', 'RAD21', 'RAD50', 'RAD51', 'RAD51B', 'RAD51C', 'RAD51D', 'RAD52', 'RAD54L', 'RAF1', 'RARA', 'RASA1', 'RB1', 'RBM10', 'RECQL', 'RECQL4', 'REL', 'RET', 'RFWD2', 'RHEB', 'RHOA', 'RICTOR', 'RIT1', 'RNF43', 'ROS1', 'RPS6KA4', 'RPS6KB2', 'RPTOR', 'RRAGC', 'RRAS', 'RRAS2', 'RTEL1', 'RUNX1', 'RXRA', 'RYBP', 'SDHA', 'SDHAF2', 'SDHB', 'SDHC', 'SDHD', 'SESN1', 'SESN2', 'SESN3', 'SETD2', 'SETD8', 'SF3B1', 'SH2B3', 'SH2D1A', 'SHOC2', 'SHQ1', 'SLX4', 'SMAD2', 'SMAD3', 'SMAD4', 'SMARCA4', 'SMARCB1', 'SMARCD1', 'SMO', 'SMYD3', 'SOCS1', 'SOS1', 'SOX17', 'SOX2', 'SOX9', 'SPEN', 'SPOP', 'SPRED1', 'SRC', 'SRSF2', 'STAG2', 'STAT3', 'STAT5A', 'STAT5B', 'STK11', 'STK19', 'STK40', 'SUFU', 'SUZ12', 'SYK', 'TAP1', 'TAP2', 'TBX3', 'TCEB1', 'TCF3', 'TCF7L2', 'TEK', 'TERT', 'TET1', 'TET2', 'TGFBR1', 'TGFBR2', 'TMEM127', 'TMPRSS2', 'TNFAIP3', 'TNFRSF14', 'TOP1', 'TP53', 'TP53BP1', 'TP63', 'TRAF2', 'TRAF7', 'TSC1', 'TSC2', 'TSHR', 'U2AF1', 'UPF1', 'VEGFA', 'VHL', 'VTCN1', 'WHSC1', 'WHSC1L1', 'WT1', 'WWTR1', 'XIAP', 'XPO1', 'XRCC2', 'YAP1', 'YES1', 'ZFHX3', 'ZRSR2')
impactGenesThatDontWorkForDnds <- c("MAP3K14", "MYCL1", "RYBP", "KMT5A", "PAK5", "FAM123B")
gene_list468 <- setdiff(gene_list468, impactGenesThatDontWorkForDnds)
dndsResults <- vector(mode="list", length=length(cancerTypes))
names(dndsResults) <- cancerTypes
#MAIN LOOP TO RUN DNDS on all cohorts
for(cType in cancerTypes){
print(paste('working on', cType))
localList <- vector(mode="list", length=2)
names(localList) <- c('Hypermutated', 'Normal')
normalMaf <- inputMaf[(inputMaf$cancerType == cType) & (inputMaf$status == 'Normal'),] #read.table(normalPath, sep = '\t', header=TRUE)
hyperMaf <- maf[(inputMaf$cancerType == cType) & (inputMaf$status == 'Hypermutated'),]#read.table(hyperPath, sep = '\t', header=TRUE)
normalMaf <- normalMaf[,c('sampleID', 'chr' ,'pos' ,'ref' ,'mut')]
hyperMaf <- hyperMaf[,c('sampleID', 'chr' ,'pos' ,'ref' ,'mut')]
print(paste('running normal, nmut:', dim(normalMaf)))
dndsNormal <- dndscv(normalMaf, gene_list = gene_list468, max_muts_per_gene_per_sample = Inf, max_coding_muts_per_sample = Inf, use_indel_sites=F)
print(paste('running hyper, nmut:', dim(hyperMaf)))
dndsHyper <- dndscv(hyperMaf, gene_list = gene_list468, max_muts_per_gene_per_sample = Inf, max_coding_muts_per_sample = Inf, use_indel_sites=F)
localList$Hypermutated <- dndsHyper
localList$Normal <- dndsNormal
dndsResults[[cType]] <- localList
}
return(dndsResults)
}
#TODO there is an error similar to:
#https://github.com/im3sanger/dndscv/issues/14
cancerTypes=
c('Colorectal Cancer', 'Endometrial Cancer', 'Glioma',
'Prostate Cancer', 'Esophagogastric Cancer', 'Bladder Cancer')
maf <- read.table('/Users/friedman/Desktop/hypermutationProjectFinal/files/mafs/impact_mutations_maf_synonymous_included.maf', sep = ',', header=TRUE)
maf <- fix_maf_cols(maf)
dndsResults <- run_dnds_on_normal_and_hypermutated_tumors(maf, cancerTypes)
dndsMerged <- vector(mode="list", length=length(cancerTypes))
for(cancerType in cancerTypes){
sel_cvN <- dndsResults[[cancerType]]$Normal$sel_cv
sel_cvH <- dndsResults[[cancerType]]$Hypermutated$sel_cv
mergedCtype <- merge(sel_cvN, sel_cvH, suffixes = c(".Normal",".Hypermutated"), by='gene_name')
mergedCtype$cancerType <- cancerType
dndsMerged[[cancerType]] <- mergedCtype
}
combinedDf <- bind_rows(dndsMerged)
colnames(combinedDf)
plt <- make_dnds_plot(combinedDf, title='DNDS Hypermutated vs\nNon-hypermutated')
ggsave('~/Desktop/plot.pdf', plt, width = 8, height = 8, units = c("in"))
write.table(combinedDf, file='/Users/friedman/Desktop/mnt/juno/work/taylorlab/friedman/myAdjustedDataFiles/dndsHypermutants.tsv', quote=FALSE, sep='\t')
####
########
##############
##################
########################
##################
##############
###########
#########
###
#TODO move this analysis to its own neighborhood
df <- read.table('/Users/friedman/Desktop/WORK/dataForLocalPlotting/dndscv_pancan_vs_local_genes.tsv', sep = '\t', header=TRUE)
endoNormalGenes <- unique(sel_cvEdnoN[sel_cvEdnoN$qglobal_cv < .01,]$gene_name)
coloNormalGenes <- unique(sel_cvColoN[sel_cvColoN$qglobal_cv < .01,]$gene_name)
#dfLim <- subset(df, !(gene %in% endoNormalGenes))
dfLim <- subset(df, !(gene %in% coloNormalGenes))
ggplot(dfLim, aes(x=nOncogenicMuts, y=1- log10(qVal), label=displayLabelOncogenic))+
geom_point()+
geom_text_repel()+
geom_segment(aes(xend=0, yend=2, y=2, x=40))+
geom_segment(aes(xend=0, yend=3, y=3, x=40))+
xlim(0,50)+
xlab('N oncogenic mutations across hypermutated cancer-type cohort')+
ylab('1 - log10(pan-cancer dndscv q value)')+
ggtitle('Oncogenic mutations in colorectal cancer\nin non-colorectal cancer related genes')
#PLOT THE DNDSCV information
df <- read.table('/Users/friedman/Desktop/WORK/dataForLocalPlotting/dndscvSummary.tsv', sep = '\t', header=TRUE)
make_dnds_plot(df, 'DNDS-CV Comparing Hypermutated and\n Non-Hypermutated Cancers')
|
deeb7cecd3448ac3d9e2a2d296f7e3489dcef28e
|
05083015cff89d8cbb53df35eeadbaa43fe3d2f3
|
/Assignment2/pollutantmean.R
|
d4f6a6f8c4e2cf2608f94bbc6bd7b85bef033ab8
|
[] |
no_license
|
AviB18/DataScienceCoursera
|
bdc2067563f98b5bf5a63bedae6fae201729b591
|
c2571e645a4e692af1265f92c30081cbe7da66be
|
refs/heads/master
| 2020-05-19T12:46:05.731944
| 2014-10-26T13:16:52
| 2014-10-26T13:16:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 391
|
r
|
pollutantmean.R
|
pollutantmean <- function(directory, pollutant, id = 1:332) {
cwd <- getwd()
setwd(directory)
fileList = paste0(formatC(id, width = 3, flag = "0"), ".csv")
data = lapply(fileList, read.csv)
polList <- list()
for(d in data) {
polList <- append(polList, d[[pollutant]])
}
polVec <- as.numeric(polList)
setwd(cwd)
mean(polVec, na.rm = TRUE)
}
|
e2e7c421fd7884ee274959fc8e5faf49723a1839
|
88e2e55f7ac29695ed214b7d52bb85a0bbf658e8
|
/scripts/05_tibble_adatbeolvasas.R
|
a4cf91df8610a75708edfa1be66fb9dbb614fdb9
|
[] |
no_license
|
bpolner/introduction_to_R
|
391cc9fc83acd4a937dc741e895170abf50f9732
|
e801e2e03b4fa6d315c6eeb2427522846ded9b9d
|
refs/heads/master
| 2022-10-28T18:03:31.763798
| 2022-10-24T08:46:45
| 2022-10-24T08:46:45
| 168,689,525
| 4
| 5
| null | 2021-03-05T20:18:23
| 2019-02-01T11:44:37
|
HTML
|
UTF-8
|
R
| false
| false
| 14,898
|
r
|
05_tibble_adatbeolvasas.R
|
# A tibble. Fájlok olvasása és írása.
library(tidyverse)
# 1. A tibble ----------------------------------------------------------------
# A tibble csomaggal lehet őket kezelni - ez is benne van a tidyverse szupercsomagban
# data.frame-ből tibble:
iris
class(iris)
as_tibble(iris)
# Vektorokbol tibble
tibble(
x = 1:5,
y = 1,
z = x ^ 2 + y
)
# Egyelemű bemeneteket automatikusan újra felhasználja / a példában y
# Lehet hivatkozni az éppen létrehozott változókra / a példában z
# A tibble-ben a valtozonevek lehetnek ervenytelen R valtozonevek
# ` ` (magyar bill: Alt Gr + 7) hasznalataval lehet rajuk hivatkozni
tb <- tibble(
`:)` = "smile",
` ` = "space",
`2000` = "number"
)
tb
# Transzponalt adatbevitel a tribble() fuggvennyel:
# ez akkor nagyon jó, ha kódban akarunk bevinni egy kisebb adattáblát.
# Oszlopok neveit formulával adjuk meg ( ~ <név>),
# az értékeket pedig vesszővel elválasztva soroljuk fel
tribble(
~x, ~y, ~z,
#--/--/---- olvashatóbb, ha a fejléc alá egy komment sort teszünk
"a", 2, 3.6,
"b", 1, 8.5
)
# tibble nyomtatasa a konzolra: alapbeállítás szerint 10 sor,
# és annyi oszlop, amennyi a konzolra kifér
# minden oszlop alatt látjuk a típusát is
mpg
# Megjelenített sorok számának és a konzolra nyomtatott adatok szélességének szabályozása:
print(mpg, n = 20, width = 100)
# Modosithatjuk az alapbeallitasokat is
# Ha tobb mint m sor van, nyomtasson n sort
# options(tibble.print_max = n, tibble.print_min = m)
# Mindig minden sor nyomtatasa:
options(dplyr.print_min = Inf)
# Mindig minden oszlop nyomtatasa, a konzol szelessegetol fuggetlenul:
options(tibble.width = Inf)
# További opciók:
package?tibble
?print.tbl
# Megtekintés az RStudio-ban:
View(mpg)
# Hogyan tudunk kinyerni egyetlen változót?
df <- tibble(
x = runif(5),
y = rnorm(5)
)
# $ - csak nev alapjan
df$x
# [[]] - nev (string!) es pozicio alapjan is
df[["x"]]
df[[1]]
# Ha egy pipe-ban szeretnénk használni, használjuk a . helyőrzőt!
df %>% .$x
df %>% .[[1]]
# Ha nem létező oszlopot próbálunk elérni,
# figyelmeztetést kapunk, eredményül pedig NULL-t
df$z
# Nehany regebbi csomag nem kompatibilis a tibble-lel.
# Ilyenkor konvertaljuk az adatainkat data.frame-é:
as.data.frame(df)
# 1.1 tibble - gyakorlas ----------------
# 1) Nyerd ki a diamonds táblából a color változót
# a) név alapján!
# b) pozíció alapján!
# 2) Ha egy valtozo neve egy változóban tarolva, hogyan lehet a valtozot kinyerni
# egy tibble-bol? mpg adatokban van a cty valtozo
var <- "cty"
# 3) Hasonlítsd össze a következő műveleteket data.frame-n és
# egy egyező tibble-n! Mi a különbség?
# Mi lehet a gond a data.frame működésében?
df <- data.frame(abc = 1, xyz = "a")
df$x
df[, "xyz"]
df[, c("abc", "xyz")]
df_t <- tibble(abc = 1, xyz = "a")
df_t$x
df_t[, "xyz"]
df_t[, c("abc", "xyz")]
# 2. Adatok importálása ------------------------------------
# 2.1 utils ----
# read.csv(), read.delim(), read.table()
# A beolvásófüggvények első argumentuma a fájl elérési útja:
# meg kell mondani a függvénynek, hogy hol van a fájl, amit
# be szeretnénk tölteni az R-be.
magas <- read.csv("C://Users/Berci/teaching/introduction_to_R/data/magassagok_1.txt")
# Ez azonban kényelmetlen lehet, ha szeretnénk,
# hogy több gépen is működjön a kód.
# Például a te gépeden elég valószínű, hogy nem fog működni.
# Megoldás, ha RStudio projektet használunk.
# Ilyenkor az .RProj fájlt tartalmazó projektmappa lesz a "kiindulópont".
# Ez az elérési út működni fog más gépen is, ha az introduction_to_R.RProj projekt meg van nyitva.
magas <- read.csv("data/magassagok_1.txt")
# Elérési utak meghatározása op.rendszerenként eltér.
# A file.path az adott op.rendszernek megfelelő formátumú elérési utat ad:
path <- file.path("data", "magassagok_1.txt")
magas <- read.csv(path)
# Honnan tudja a file.path a helyes formátumot?
# Nézzük meg a forráskódot!
file.path
# A .Platform listából nézi meg
.Platform
# A read.csv jól működik, ha a csv fájlban a tizedesvessző ponttal,
# az oszlopok elválasztása pedig vesszővel van jelölve (angolszász)
# DE: a tizedesvesszőt van, ahol vesszővel jelölik (pl. magyar)
# és az oszlopokat ;-vel választják el egymástól,
# ezeket a read.csv "nem érti":
path <- file.path("data", "magassagok_2.txt")
(magas_2 <- read.csv(path))
str(magas_2)
# Erre van kitalálva a read.csv2:
(magas_2 <- read.csv2(path))
str(magas_2)
# Mit csinál a read.csv és a read.csv2 ?
read.csv
read.csv2
# A read.table függvényt hívják meg,
# a sep és a dec argumentumokat a célnak megfelelően beállítva
# 2.2 readr ----
# A readr csomag legtöbb függvénye szövegfájlokat olvas be:
# read_csv (vesszővel tagolt)
# read_tsv (tabulátorral tagolt)
# read_csv2 (pontosvesszovel tagolt)
# read_delim (bármilyen tagoló megadható)
# read_fwf (fix szélességű fájlok)
# a read fuggvenyek elso argumentuma a beolvasando fajl eleresi utvonala
read_csv("data/magassagok_1.txt")
data_path <- "data"
path <- file.path(data_path, "magassagok_1.txt")
read_csv(path)
# Meg lehet adni a bemenet csv fajlt a fuggveny hivasakor is
# Igy lehet probalgatni a readr mukodeset
# Es masoknak lehet reprodukalhato peldakat adni (pl stackoverflow-n)
read_csv(
"a,b,c
1,2,3
4,5,6"
)
# A beolvasott fajl elso sorat oszlopnevkent ertelmezi a readr
# Felulirhato! Mikor lehet erre szukseg?
# 1) metaadatok a fájl tetején
read_csv(
"The first line of metadata
The second line of metadata
x,y,z
1,2,3",
skip = 2
)
read_csv(
"# A comment I want to skip
x,y,z
1,2,3",
comment = "#"
)
# 2) nincs fejlec az adatokban
# a, megmondjuk, hogy nincs fejléc (\n sortorest jeloli)
read_csv("1,2,3\n4,5,6", col_names = FALSE)
# b, megadunk egy fejlécet
read_csv("1,2,3\n4,5,6", col_names = c("x", "y", "z"))
# hianyzo adatok jelolesenek meghatarozasa: na argumentum
read_csv("a,b,c\n1,2,.", na = ".")
# Az esetek 75%-ában elég a fentieket tudni
# a csv fájlok olvasásához!
# A readr a base R beolvasóihoz képest:
# - gyorsabb
# - tibble-be olvasnak be
# - karakter oszlopokból nem lesz automatikusan faktor
# - nem használ sorneveket
# - jobb a megismételhetőségük (base R függ op.rendsz.-től és R beállításoktól)
# 2.2.1 readr - gyakorlás -------------------------------------------------
# 1) A data könyvtárban a digitspan_data.txt-ben négy személy életkora és számterjedelme van.
# Az első két sorban az adatgyűjtés időpontja, és a kurzus neve van. Ezeket nem szeretnénk beolvasni.
# Viszont azt szeretnénk, hogy az első oszlop neve "age", a második oszlop neve pedig "digitspan" legyen.
# Olvasd be egy tibble-be ennek megfelelően a digitspan_data.txt-t!
# 2) Mi a gond ezekkel a kódból megadott csv fájlokkal?
read_csv("a,b\n1,2,3\n4,5,6")
read_csv("a,b,c\n1,2\n1,2,3,4")
read_csv("a,b\n\"1")
read_csv("a,b\n1,2\na,b")
read_csv("a;b\n1;3")
# 2.3 Vektorok elemzése ---------------------------------------------------
# Hogyan olvassa be a readr a lemezről a fájlokat?
# Ehhez előbb nézzük meg, hogyan dolgoznak a parse_* függvények!
# Adunk nekik egy karaktervektort, és visszaadnak egy speciális vektort.
str(parse_logical(c("TRUE", "FALSE", "NA")))
str(parse_integer(c("1", "2", "3")))
str(parse_date(c("2010-01-01", "1979-10-14")))
# 1. arg.: elemzendő karaktervektor, 2. arg.: NA jelölésének meghatározása
parse_integer(c("1", "231", ".", "456"), na = ".")
# Mi történik, ha nem sikerül az elemzés?
x <- parse_integer(c("123", "345", "abc", "123.45"))
# Amit nem sikerült elemezni, ott NA-t kapunk
x
# Sikertelen elemzések áttekintése:
problems(x)
# 2.3.1 Számok ------------------------------------------------------------
# Miért nem olyan egyszerű?
# a, tizedesvessző lehet . és , is
parse_double("1.23")
# Helyi sajátosságok beállítása: locale
parse_double("1,23", locale = locale(decimal_mark = ","))
# b, számok körül néha spec. karakterek $ %
# parse_number nagyon hasznos: elenged minden karaktert, ami nem szám
parse_number("$100")
parse_number("20%")
parse_number("It cost $123.45")
# c, különböző csoportosító karakterek 1'000 1,000 1 000
# parse_number, locale-ban pedig megadjuk a csoportosítót:
# USA
parse_number("$123,456,789")
# Európában gyakori:
parse_number("123.456.789", locale = locale(grouping_mark = "."))
# Svájc:
parse_number("123'456'789", locale = locale(grouping_mark = "'"))
# 2.3.2 Karakterláncok (strings) ------------------------------------------
# Ugyanaz a string többféleképpen is reprezentálható
# Hogyan reprezentál az R egy stringet?
charToRaw("Hadley")
# Minden hexadecimális szám egy bájtnyi információt reprezentál.
# A karakterkódolás a hexadecimális számok és karakterek megfeleltetése.
# ASCII kódolás az angol nyelvre jó
# Egyéb nyelvek:
# Latin1 / ISO-8859-2 Ny-Eu nyelvek
# Latin2 / ISO-8859-2- K-Eu nyelvek
#
# Nagyon elterjedt és szinte mindent kódol: UTF-8
# A readr alapértelmezésként UTF-8-at használ beolvasáshoz és íráshoz is
# Probléma akkor lehet, ha nem UTF-8-ban van kódolva az adat,
# ilyenkor furcsa stringeket fogunk látni:
x1 <- "\x82\xb1\x82\xf1\x82\xc9\x82\xbf\x82\xcd"
x1
parse_character(x1, locale = locale(encoding = "Shift-JIS"))
# Hogyan jövünk rá a kódolásra?
# Jobb esetben a dokumentációból kiderül.
# Ha nem, segíthet a guess_encoding (sok szöveggel jobban működik)
szoveg <- "Ez egy magyar nyelvű szöveg."
guess_encoding(charToRaw(szoveg))
# 2.3.3 Faktorok (kategorikus változók) -----------------------------------
# Lehetséges értékek halmaza ismert
fruit <- c("apple", "banana")
parse_factor(c("apple", "banana", "bananana"), levels = fruit)
# Ha sok problémás eset van, érdemes faktor helyett
# inkább stringnek beolvasni, és később kipucolni
# 2.3.4 Dátum és idő ------------------------------------------------------
# Alapbeállítás szerint:
# a, parse_datetime ISO8601 formátumra számít év, hónap, nap, óra, perc, másodperc
parse_datetime("2010-10-20 141345")
# Ha az idő hiányzik, azt éjfélnek veszi
parse_datetime("20101010")
# b, parse_date év (4 számj.), majd - vagy /, hónap, majd - or /, és a nap:
parse_date("2010-10-01")
# c, parse_time óra : perc (opcionális : másodperc és am/pm)
parse_time("01:10 am")
parse_time("20:10:01")
# Ha az alapbeállítások nem működnek, megadhatunk egyedi formátumot
# részleteket lásd https://r4ds.had.co.nz/data-import.html#readr-datetimes
parse_date("01/02/15", "%d/%m/%y") # nap/hónap/év, tehát február 1.
parse_date("01/02/15", "%m/%d/%y") # hónap/nap/év, tehát január 2.
parse_date("Jan 23 2015", "%b %d %Y") # <hónap rövid neve> <nap> <év>
# 2.4 Fájlok elemzése -----------------------------------------------------
# Hogyan találja ki a readr a változók típusát?
# Megnézi az első 1000 sort, és heurisztikákat alkalmaz
# FRISSÍTÉS: read_csv újabb verziója alapbeállítás szerint az összes sort megnézi
guess_parser("2010-10-01")
guess_parser("15:01")
guess_parser(c("TRUE", "FALSE"))
guess_parser(c("1", "5", "9"))
guess_parser(c("12,352,561"))
# De lehet, hogy az első 1000 sor speciális, pl.
# - első 1000 sorban egész szám van, de később jönnek tizedestörtek is
# - első 1000 sorban NA van, később jönnek az értékek
# Lássunk egy példát!
challenge <- read_csv(readr_example("challenge.csv"))
problems(challenge)
# Mi volt az oszlopok meghatározása az előző beolvasásnál?
# Módosítsuk az oszlopok típusát!
challenge <- read_csv(
readr_example("challenge.csv"),
col_types = cols(
x = col_double(),
y = col_character()
)
)
# Mi a helyzet az y-nal?
challenge
tail(challenge)
# Adjuk meg helyesen típuását ennek az oszlopnak is!
challenge <- read_csv(
readr_example("challenge.csv"),
col_types = cols(
x = col_double(),
y = col_date()
)
)
challenge
tail(challenge)
# Ha az adat már R-ben van mint karaktervektor: parse_xyz
# Ha a readr beolvasást akarjuk irányítani: col_xyz
# Érdemes mindig explicit megadni az oszlopok típusát,
# következetes és reprodukálhatóbb lesz az adatimportálás.
# Megoldás lehet még, ha több sor alapján következtetünk
# az oszlop típusára.
challenge_2 <- read_csv(
readr_example("challenge.csv"),
guess_max = 3000
)
# Vagy ha minden oszlopot karakternek olvasunk be,
challenge_2 <- read_csv(
readr_example("challenge.csv"),
col_types = cols(.default = col_character())
)
# És utána a type_convert-tel felismertetjük az oszlopok típusát
type_convert(challenge_2)
# Hogyan találja ki a type_convert a típust?
# http://r4ds.had.co.nz/data-import.html#parsing-a-file
# 2.4 fájlok elemzése - gyakorlás
# 1) A data könyvtárban a recog_data.csv fájl egy felismerési teszt eredményeit tartalmazza egy sajátos formátumban.
# Az első oszlopban a résztvevő azonosítója, a másodikban a felismerési pontosság %-ban megadva,
# a harmadik oszlopban pedig a résztvevő életkora.
# Akinél nincs meg az életkor, annál azt írták a táblázatba, hogy "nem ismert".
# A legfelső sorban a kísérlet neve szerepel.
# Olvasd be és dolgozd fel az adatokat, ügyelve arra, hogy
# - a kísérlet neve ne legyen része a beolvasott tibble-nek
# - adj az oszlopoknak értelmes neveket már a beolvasásnál
# - a hiányzó adatok NA-nak legyenek beolvasva
# - a beolvasás után a százalékos pontosság legyen 0 és 1 közötti tizedestörtté alakítva (mutate)
# 3. Fájlba írás ----------------------------------------------------------
# readr: write_tsv és write_csv
# Könnyen olvasható fájlokat írnak:
# - karakterláncokat UTF-8-ban kódolják
# - dátum, dátum-idő ISO8601 formátumban
# Két arg-t mindenképp meg kell adni: melyik adatot, és hová írjuk
write_csv(challenge, "challenge_out.csv")
ch_again <- read_csv("challenge_out.csv")
# Az oszlopok típusa elveszett időközben!
# Mikor lehet ez zavaró?
# Alternatívák: fájlba írunk komplett objektumokat
# a, RDS (R saját bináris fájlformátuma)
write_rds(challenge, "challenge_out.RDS")
(ch_rds <- read_rds("challenge_out.RDS"))
# b, feather (gyors bináris fájlformátum, más nyelvek is "értik") (feather csomag kell hozzá!)
# install.packages("feather)
# library(feather)
write_feather(challenge, "challenge.feather")
read_feather("challenge.feather")
# További adattípusok (xls, xlsx, SPSS, SAS, Stata, ...)
# http://r4ds.had.co.nz/data-import.html#other-types-of-data
# https://cran.r-project.org/web/packages/XLConnect/vignettes/XLConnect.pdf
|
374e88f2c3cbb5b9f865277a09f4040ceb8bc254
|
b2eca9ace5716ca74d66980cd89b3baf8805597c
|
/hierarchical_CleanedData.R
|
b9e62878e45a9fb3d1adf5ec0544a133d985f8d6
|
[] |
no_license
|
AlexandrosPetropoulos/BankMarketing
|
c00fc4ea271f9abd6abf330fac3e22fc58e97e63
|
e5f185205bf52eb1ff14f83b71237a3223b20907
|
refs/heads/main
| 2023-01-30T14:38:14.507393
| 2020-12-18T13:22:10
| 2020-12-18T13:22:10
| 322,601,082
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,073
|
r
|
hierarchical_CleanedData.R
|
# Clear plots
if(!is.null(dev.list())) dev.off()
# Clean workspace
rm(list=ls())
# Clear console
cat("\014")
# Set working directory
setwd("~/Big_Data")
# Load libraries (mporei na ksemine kapoia apo tis dokimes kai tora na min xreiazetai)
# entoli gia install packages install.packages("caret")
#library(rpart) #DECISION TREES
#library(rpart.plot) #DECISION TREES
#library(mice)
#library(VIM)
#library(gdata)
#library(plyr)#prosoxi prota prepei na ginei load i plyr kai meta i dplyr
#library(dplyr)
library(ggplot2)
#library(FSelector)
#library(e1071) #NAIVE BAYES
library(caret)
#library(pander)
#library(class)
library(MLmetrics)# xreiazetai gia to confusionMatrix
#library(mlbench)
#library(klaR)
library(class) #KNN
#library(ROCR)
library(cluster)#KMEANS,HIERARCHICAL
#library(factoextra)
#library(NbClust)
#library(pracma)
#library(mixtools)
#library(dbscan)
#library(DataExplorer) #create_report(train)
#hkmeans
# Import data from csv
bank_data_original=read.table(file ="bank-additional/bank-additional-full.csv",header=TRUE,sep=";")
bank <- bank_data_original
#Data preparation cleaning
#shuffle Data
#PROSOXI PREPEI OPOSDIPOTE NA EINAI FALSE, allios vgainoun xiliades duplicates
#search google for sampling with replacement
set.seed(248)
bank <- bank[sample(1:nrow(bank),replace=FALSE), ]
#split in train and validation test stratified
#createDataPartition epistrefei mia lista me FALSE kai TRUE
inBank <- createDataPartition(y = bank$y, p = .70, list = FALSE)
#inBank <- createDataPartition(y = bank, p = .70, list = FALSE)
training <- bank[inBank,]
testing <- bank[-inBank,]
#library(ROSE)
#balance data
#data.balanced.ou <- ovun.sample(y~., data=training,N=nrow(training), p=0.5, seed=1, method="both")$data
#training<-data.balanced.ou
#prota kanoume scale ta data kai meta binarization
#prosxi training kai test prepei na ginoun me siggekrimeno tropo, opos edo
preProcValues <- preProcess(training, method = c("center", "scale"))
#preProcValues <- preProcess(training, method = "range",rangeBounds = c(0, 1))
trainTransformed <- predict(preProcValues, training)
testTransformed <- predict(preProcValues, testing)
trainTransformed <- training
testTransformed <- testing
# dummify the data
#kano dummy to y tote pernei diafores times metaksi 0 kai 1
#dmy <- dummyVars(" ~ .", data = trainTransformed)
dmy <- dummyVars(" ~ .", data = trainTransformed, fullRank=T)
trainset <- data.frame(predict(dmy, newdata = trainTransformed))
#epidi to target egine y.no kai y.yes kratame mono to ena apo ta 2
#trainset <-trainset[,-dim(trainset)[2]]
colnames(trainset)[dim(trainset)[2]] <- "y"
#dmy <- dummyVars(" ~ .", data = testTransformed)
dmy <- dummyVars(" ~ .", data = testTransformed, fullRank=T)
testset <- data.frame(predict(dmy, newdata = testTransformed))
#epidi to target egine y.no kai y.yes kratame mono to ena apo ta 2
#testset <-testset[,-dim(testset)[2]]
colnames(testset)[dim(testset)[2]] <- "y"
#trainset<-trainset[,c(1:40,54)]
#testset<-testset[,c(1:40,54)]
###################UNSUPERVISED
#
#Hierarchical
#load("dist_un")
load("dist_unscaled.Rdata")
#d = dist(trainset[,-ncol(trainset)])
#dim(trainset) 27364 54 range 0-1
#save(d, file = "dist_hierarchical_70_30.Rdata")
#poia methodo
#p.x. spherical clusters me wards
hc_single = hclust(d, method = "single")
#plot(hc_single)
clusters = cutree(hc_single, k = 8)
hc_complete = hclust(d, method = "complete")
#plot(hc_complete)
clusters = cutree(hc_complete, k = 8)
hc_ave = hclust(d, method = "ave")
#plot(hc_complete)
clusters = cutree(hc_ave, k = 3)
# Silhouette vgenei veltisto 2
slc = c()
for (i in 2:10){
# Create clusters
clusters = cutree(hc_ave, k = i)
# Calculate and store silhouette values
slc [i-1] = mean(silhouette(clusters, d)[, 3])
}
plot(2:10, slc, type="b", xlab="Number of Clusters",
ylab="Silhouette")
abline(v=3, lty=2)
#Elbow gia hierarchical
#enonoume ta dedomena train kai test
#?
#?
#?
nCluster = 3
#clusters = cutree(hc_single, k = nCluster)
#head(clusters)
majorityClass <- NULL
for (i in 1:nCluster){
#trainset[clusters == i &,,drop=FALSE]
if(dim(trainset[clusters==i & trainset$y==0,])[1]>dim(trainset[clusters==i & trainset$y==1,])[1]){
majorityClass <- c(majorityClass,0)
}
else{
majorityClass <- c(majorityClass,1)
}
}
majorityClass
#anatheto san neo y ta clusters sta opoia anikei i kathe grammi
#temptrainset <-trainset
#temptrainset$y<-NULL
#temptrainset$yy <- clusters
#apla edo anti gia to y tou train vazoume ta clusters pou dimiourgithikan
#prediction = knn(trainset[,-ncol(trainset)], testset[,-ncol(testset)], temptrainset$yy, k = 1)
#head(prediction)
#arkei kai to parakato , mporo na sviso ta temp(to clusters isos thelei na ginei dataframe , alla kai etsi douleui)
prediction = knn(trainset[,-ncol(trainset)], testset[,-ncol(testset)], clusters, k = 1)
#gia error sto train set
#prediction = knn(trainset[,-ncol(trainset)], trainset[,-ncol(trainset)], clusters, k = 1)
########
#den xreiazetai
#for (i in 1:size(clusters)[2]){
# clusters[[i]] <- majorityClass[clusters[[i]]]
#
#}
######
#peritto i metatropi , alla auto skeftika pio grigora
#ggplot2 gia ti size
prediction_int<-as.numeric(prediction)
for(i in 1:length(prediction_int)){
prediction_int[i] <- majorityClass[prediction_int[i]]
}
results <- confusionMatrix(factor(prediction_int,levels = c(0,1)), factor(testset[,ncol(testset)]),positive="1")
setNames(data.frame(results$overall["Accuracy"],results$byClass["Precision"],results$byClass["Recall"],results$byClass["F1"],row.names = NULL),c("Accuracy","Precision","Recall","F1"))
source("draw_confusion_matrix.R")
draw_confusion_matrix(results)
#error gia trainset
results <- confusionMatrix(factor(prediction_int,levels = c(0,1)), factor(trainset[,ncol(trainset)]),positive="1")
setNames(data.frame(results$overall["Accuracy"],results$byClass["Precision"],results$byClass["Recall"],results$byClass["F1"],row.names = NULL),c("Accuracy","Precision","Recall","F1"))
############################################################################################
#den xrisimopoiithike telika , alla einai kali sinartisi , tin afino gia na tin exo
#gia na anatheso nea stoixeia ipologizo tin apostasi
#https://stackoverflow.com/questions/39005958/r-how-to-get-row-column-subscripts-of-matched-elements-from-a-distance-matri
## 1D index to 2D index
finv <- function (k, dist_obj) {
if (!inherits(dist_obj, "dist")) stop("please provide a 'dist' object")
n <- attr(dist_obj, "Size")
valid <- (k >= 1) & (k <= n * (n - 1) / 2)
k_valid <- k[valid]
j <- rep.int(NA_real_, length(k))
j[valid] <- floor(((2 * n + 1) - sqrt((2 * n - 1) ^ 2 - 8 * (k_valid - 1))) / 2)
i <- j + k - (2 * n - j) * (j - 1) / 2
cbind(i, j)
}
#dd is dist object
#finv(which.min(dd),dd)
head(trainset[clusters==i,-ncol(trainset)])
#pososta yes kai no se kathe cluster gia na poume poso kala einai
#single
#gia to 1
dim(trainset[clusters==1 & trainset$y==1,])[1]
# 2572 yes
dim(trainset[clusters==1 & trainset$y==0,])[1]
#25206 no
2572/(25206+2572)
# 9% yes 90% no
#gia to 2
dim(trainset[clusters==2 & trainset$y==1,])[1]
#375
dim(trainset[clusters==2 & trainset$y==0,])[1]
#gia to 3
dim(trainset[clusters==3 & trainset$y==1,])[1]
#0
dim(trainset[clusters==3 & trainset$y==0,])[1]
#1
#gia to 4
dim(trainset[clusters==4 & trainset$y==1,])[1]
#0
dim(trainset[clusters==4 & trainset$y==0,])[1]
#1
#complete
#gia to 1
dim(trainset[clusters==1 & trainset$y==1,])[1]
# 3042 yes
dim(trainset[clusters==1 & trainset$y==0,])[1]
#25408 no
#gia to 2
dim(trainset[clusters==2 & trainset$y==1,])[1]
#201
dim(trainset[clusters==2 & trainset$y==0,])[1]
#95
dim(trainset[clusters==3 & trainset$y==1,])[1]
#5
dim(trainset[clusters==3 & trainset$y==0,])[1]
#7
#gia to 4
dim(trainset[clusters==4 & trainset$y==1,])[1]
#0
dim(trainset[clusters==4 & trainset$y==0,])[1]
#74
for (i in 1:nCluster){
a=dim(trainset[clusters==i & trainset$y==1,])[1]
b=dim(trainset[clusters==i & trainset$y==0,])[1]
if(majorityClass[i] == 0){
x= (b/(b+a))
}
else{
x = (a/(b+a))
}
print(x)
}
|
d35afe3130be0484307dfcca1ec385d027a4df51
|
b2757d8cca182148d664e55a5d29aa1b82e453e5
|
/script/rml/instacard/tools.R
|
1af239232868b24d6ffa698644d8e1a74a159443
|
[] |
no_license
|
genpack/tutorials
|
99c5efb6b0c81a6ffeda0878a43ad13b1987ceeb
|
c1fe717272f06b81ca7a4855a0b874e98fde4c76
|
refs/heads/master
| 2023-06-10T21:18:27.491378
| 2023-06-06T00:15:14
| 2023-06-06T00:15:14
| 163,214,555
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,774
|
r
|
tools.R
|
## Tools for instacard project:
# Version: 2
# Product Within-Cluster Features Added:
next_last = function(v){
if(length(v) < 2) return(-1) else return(v[length(v) - 1])
}
uncumsum = function(v){
nn = length(v)
if(nn < 2) return(-1) else return(c(v[1], v[-1] - v[-nn]))
}
## input dataset must contain all orders of each customer upto the current order number
build_features = function(dataset){
dataset %>%
arrange(user_id, order_number) %>%
distinct(user_id, order_number, days_since_prior_order) %>%
group_by(user_id) %>%
summarise(avgDaysBetweenCustomerOrders = mean(days_since_prior_order, na.rm = T),
varDaysBetweenCustomerOrders = var(days_since_prior_order, na.rm = T)) %>%
ungroup -> customer_order_pace
# Customer Profile:
dataset %>%
group_by(user_id) %>%
summarise(TotalCountCustomerProductsOrdered = length(product_id),
current_order_id = last(order_id),
current_order_number = max(order_number),
current_day_number = max(day_number),
TotalCountCustomerOrders = max(order_number),
TotalCountCustomerReorders = sum(reordered),
avgCustomerOrderHOD = mean(order_hour_of_day %>% as.numeric, na.rm = T),
maxCustomerOrderHOD = max(order_hour_of_day %>% as.numeric, na.rm = T),
minCustomerOrderHOD = min(order_hour_of_day %>% as.numeric, na.rm = T),
avgCustomerOrderDOW = mean(order_dow),
modCustomerOrderDOW = most_frequent(order_dow)) %>%
ungroup %>%
left_join(customer_order_pace, by = 'user_id') -> customer_profile
# Customer-aisle profile:
dataset %>%
group_by(user_id, aisle_id) %>%
summarise(CountCustomerWithinAisleOrdered = length(order_id)) %>%
ungroup -> customer_aisle_profile
# Customer-department profile:
dataset %>%
group_by(user_id, department_id) %>%
summarise(CountCustomerWithinDepartmentOrdered = length(order_id)) -> customer_department_profile
# Customer-Cluster profile: (By cluster we mean product cluster)
dataset %>%
group_by(user_id, cluster_id) %>%
summarise(CountCustomerWithinClusterOrdered = length(order_id)) %>%
ungroup -> customer_cluster_profile
# Product Profile:
dataset %>%
group_by(product_id) %>%
summarise(TotalProductOrdered = length(order_id),
TotalProductReordered = sum(reordered),
avgProductOrderHOD = mean(order_hour_of_day %>% as.numeric, na.rm = T),
maxProductOrderHod = max(order_hour_of_day %>% as.numeric, na.rm = T),
minProductOrderHod = min(order_hour_of_day %>% as.numeric, na.rm = T),
varProductOrderHod = var(order_hour_of_day %>% as.numeric, na.rm = T),
avgProductOrderDOW = mean(order_dow),
modProductOrderDOW = most_frequent(order_dow),
varProductOrderDOW = var(order_dow)
) %>%
ungroup -> product_profile
# Aisle Profile:
dataset %>%
group_by(aisle_id) %>%
summarise(AbsoluteAislePopularity = length(order_id),
TotalWithinAisleReorders = sum(reordered)) %>%
ungroup -> aisle_profile
# Department Profile:
dataset %>%
group_by(department_id) %>%
summarise(AbsoluteDepartmentPopularity = length(order_id),
TotalWithinDepartmentReorders = sum(reordered)) %>%
ungroup -> department_profile
# Cluster Profile:
dataset %>%
group_by(cluster_id) %>%
summarise(AbsoluteClusterPopularity = length(order_id),
TotalWithinClusterReorders = sum(reordered)) %>%
ungroup -> cluster_profile
# Feature 1:
# CountProductOrdered:
dataset %>%
left_join(customer_profile, by = 'user_id') %>%
arrange(user_id, order_number) %>%
group_by(user_id, product_id, aisle_id, department_id, cluster_id) %>%
summarise(CountCustomerProductOrdered = length(order_id),
CountCustomerProductReordered = sum(reordered),
CountCustomerProductFirstSelection = sum(order_number == 1),
CountCustomerProductFirstTwoSelections = sum(order_number < 3),
CountCustomerProductFirstThreeSelections = sum(order_number < 4),
IsProductInCurrentCustomersOrder = sum(order_id == current_order_id),
day_number_last_product_ordered = last(day_number),
day_number_nextlast_product_ordered = next_last(day_number),
order_number_last_product_ordered = last(order_number),
order_number_nextlast_product_ordered = next_last(order_number),
avgDaysBetweenProductOrders = mean(uncumsum(day_number), na.rm = T),
varDaysBetweenProductOrders = var(uncumsum(day_number), na.rm = T)
) %>%
ungroup %>%
left_join(customer_aisle_profile, by = c('user_id', 'aisle_id')) %>%
left_join(customer_department_profile, by = c('user_id', 'department_id')) %>%
left_join(customer_cluster_profile, by = c('user_id', 'cluster_id')) %>%
left_join(product_profile, by = 'product_id') %>%
left_join(aisle_profile, by = 'aisle_id') %>%
left_join(department_profile, by = 'department_id') %>%
left_join(cluster_profile, by = 'cluster_id') %>%
left_join(customer_profile, by = 'user_id') -> train_data
### Computed Features: relative poularities, percentages, ratios and rates ...
train_data %>%
mutate(CustomerProductPerOrder = CountCustomerProductOrdered/TotalCountCustomerOrders,
CustomerWithinAislePerOrder = CountCustomerWithinAisleOrdered/TotalCountCustomerOrders,
CustomerWithinDepartmentPerOrder = CountCustomerWithinDepartmentOrdered/TotalCountCustomerOrders,
CustomerWithinClusterPerOrder = CountCustomerWithinClusterOrdered/TotalCountCustomerOrders,
CustomerProductPerAisle = CountCustomerProductOrdered/CountCustomerWithinAisleOrdered,
CustomerProductPerDepartment = CountCustomerProductOrdered/CountCustomerWithinDepartmentOrdered,
CustomerProductPerCluster = CountCustomerProductOrdered/CountCustomerWithinClusterOrdered,
CustomerProductFirstSelectionRate = CountCustomerProductFirstSelection/CountCustomerProductOrdered,
CustomerWithinAisleRate = CountCustomerWithinAisleOrdered/TotalCountCustomerProductsOrdered,
CustomerWithinDepartmentRate = CountCustomerWithinDepartmentOrdered/TotalCountCustomerProductsOrdered,
CustomerWithinClusterRate = CountCustomerWithinClusterOrdered/TotalCountCustomerProductsOrdered,
CustomerAislePerDepartment = CountCustomerWithinAisleOrdered/CountCustomerWithinDepartmentOrdered,
ProductPerAislePopularity = TotalProductOrdered/AbsoluteAislePopularity,
ProductPerDepartmentPopularity = TotalProductOrdered/AbsoluteDepartmentPopularity,
ProductPerClusterPopularity = TotalProductOrdered/AbsoluteClusterPopularity,
AislePerDepartmentPopularity = AbsoluteAislePopularity/AbsoluteDepartmentPopularity,
DaysSinceLastProductOrdered = day_number_last_product_ordered - day_number_nextlast_product_ordered,
OrdersSinceLastProductOrdered = order_number_last_product_ordered - order_number_nextlast_product_ordered - 1)
}
# input id_set is dataset containing user_id and product_id
# input cutpoint_dataset must have the same columns as dataset passed to function build_features()
build_labels = function(id_set, label_dataset){
id_set %>%
left_join(label_dataset[, c('user_id', 'product_id', 'reordered')], by = c('user_id', 'product_id')) %>%
mutate(label = as.numeric(!is.na(reordered))) %>%
pull(label)
}
build_training_pack = function(orders, products, order_products_prior, cutpoint_split = 'last_order', customer_subset = NULL){
if(is.null(products$cluster_id)){products$cluster_id <- 1}
products$cluster_id[is.na(products$cluster_id)] <- 0
order_products = order_products_prior %>%
left_join(products, by = 'product_id') %>%
left_join(orders %>%
# filter(eval_set == 'prior') %>%
select(-eval_set) %>%
arrange(user_id, order_number) %>%
mutate(day_number = days_since_prior_order %>% na2zero) %>%
rutils::column.cumulative.forward(col = 'day_number', id_col = 'user_id')
, by = 'order_id')
if(!is.null(customer_subset)){
order_products %<>% filter(user_id %in% customer_subset)
}
### Pick cutpoint order numbers:
order_products %>%
arrange(user_id, order_number) %>%
group_by(user_id) %>%
summarise(cutpoint_order_number = max(order_number),
cutpoint_order_id = last(order_id)) %>%
ungroup -> cutpoint_orders
order_products %<>%
left_join(cutpoint_orders, by = 'user_id') %>%
mutate(flag_label = order_number == cutpoint_order_number,
flag_feature = order_number < cutpoint_order_number)
ind_features = which(order_products$flag_feature)
ind_labels = which(order_products$flag_label)
cat('\n', '%s rows total, %s rows for feature aggregation, %s rows for labeling, %s rows removed!' %>%
sprintf(nrow(order_products),
length(ind_features),
length(ind_labels),
nrow(order_products) - length(ind_features) - length(ind_labels)))
return(list(feature_dataset = order_products[ind_features,], label_dataset = order_products[ind_labels,], cutpoint_orders = cutpoint_orders))
}
add_cutpoint_features = function(X_train, orders, cutpoint_orders){
X_train %>%
left_join(cutpoint_orders, by = 'user_id') %>%
left_join(orders %>% select(-user_id, -eval_set, -order_number) %>%
rename(cutpoint_order_id = order_id,
cutpoint_order_dow = order_dow,
cutpoint_order_hod = order_hour_of_day,
cutpoint_order_dsp = days_since_prior_order), by = 'cutpoint_order_id')
}
##### Event-Oriented Modelling:
## Create Eventlogs from order_product table:
create_eventlog = function(orders, products, order_products, keywords){
products$title = products$product_name %>%
gsub(pattern = '\\s', replacement = '') %>%
gsub(pattern = '-', replacement = '')
# keywords %<>% tolower
# List of keyword products:
keyword_products = list()
for(kw in keywords){
keyword_products[[kw]] <- products$title %>% charFilter(kw)
}
products_filtered = products %>%
dplyr::filter(title %in% unlist(keyword_products))
order_products_filtered = order_products %>%
dplyr::filter(product_id %in% products_filtered$product_id) %>%
left_join(products_filtered, by = 'product_id') %>%
left_join(orders, by = 'order_id')
eventlogs = list()
# Creating events: ordering products in which keywords are used:
for(kw in names(keyword_products)){
cat('\n', 'Creating eventlog for keyword: ', kw, ' ... ')
order_products_filtered %>%
dplyr::filter(title %in% keyword_products[[kw]]) %>%
rename(caseID = user_id, eventTime = order_number) %>%
group_by(caseID, eventTime) %>%
summarise(value = length(product_id)) %>%
ungroup %>%
mutate(eventType = paste0(kw, 'Ordered'), attribute = 'Count') %>%
select(caseID, eventTime, eventType, attribute, value) -> eventlogs[[kw]]
cat('Done!', '\n')
}
users = order_products_filtered$user_id %>% unique
orders %>%
filter(user_id %in% users) %>%
mutate(eventType = 'OrderIssued', attribute = 'DSPO') %>%
select(caseID = user_id, eventTime = order_number, eventType, attribute, value = days_since_prior_order) %>%
na2zero -> eventlogs[['orders']]
return(eventlogs)
}
create_dynamic_features = function(eventlog, aggregators = c('sum', 'max'), types = c('s', 'e'), win_sizes = c(2,3,6)){
# eventlog = els$chocolate
eventlog %>%
mutate(eventTime = as.Date('2010-07-01') + eventTime) %>%
promer::dfg_pack(event_funs = c('count', 'count_cumsum', 'elapsed', 'tte', 'censored'),
var_funs = c('sum', 'sum_cumsum', 'last'),
horizon = 1) -> dfp
swf_tables = c('event_count', 'var_sum', 'var_last')
dfp %<>% promer::add_swf(tables = swf_tables, aggregators = aggregators, win_sizes = win_sizes)
}
extract_dataset_from_dfpack = function(dfp, target_keyword){
ftables = names(dfp) %-% c("case_timends", "event_attr_map", "event_time", "event_tte", "event_censored", "event_label")
df = dfp[[ftables[1]]]
for(tn in ftables[-1]){
df %<>% left_join(dfp[[tn]], by = c('caseID', 'eventTime'))
}
return(list(X = df, y = dfp$event_label[[sprintf("%sOrdered_label", target_keyword)]]))
}
|
f5c2d72e05cc9975d77189f34e06d4f354b5f78f
|
9b090643f95d8ca5f91b9e7beab6fb040f0dd7bd
|
/Set 1 excercise.R
|
c8d9d37fc673e71420485ed29d309771ce630b91
|
[] |
no_license
|
smrutisanchita/Hypothesis-Testing-Statistics-Using-R
|
dfea3af45e41474519efb8c9edc419dd090ed836
|
4115b177c944fbbe3ce3432b4dde172c0729c4b0
|
refs/heads/main
| 2023-03-05T11:23:34.026884
| 2021-02-18T09:39:28
| 2021-02-18T09:39:28
| 339,997,132
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,080
|
r
|
Set 1 excercise.R
|
#Set 1 1 Sample Z- test
##########################
#Question 1
1 Sample Z- test
---------------
#ho: u=130
#h1: u!=130
n=9
mu0=130
x_bar=131.08
sigma=1.5
alpha=0.01
z=(x_bar-mu0)/(sigma/sqrt(n))
#two tail test
qnorm(alpha/2)
# as z is not in CR , we will NOT reject Ho
------------------------------------------------------------
#Question 2
#ho: u=75
#h1: u<75
n=25
mu0=75
x_bar=72.3
sigma=9
alpha=0.05
z=(x_bar-mu0)/(sigma/sqrt(n))
#one tail test
qnorm(alpha)
# CR for Z (-inf,-1.644]
# As z doesnt lie within CR , we will NOT Reject Ho
#part b
alpha=0.1
mu1=71
cp=qnorm(alpha) #1.28
#P(z>-1.28|u=71)
type_2_error=1-pnorm(cp-(mu1-mu0)/(sigma/sqrt(n))) # 0.1734
----------------------------------------------------------------------
# question 3
#ho: u=750
#h1:u<750
mu0=750
sigma=38
n=50
x_bar=738.44
alpha=0.05
z=(x_bar-mu0)/(sigma/sqrt(n)) # -2.15
qnorm(alpha) # -1.644
# CR for z is (-inf,-1.644]
# as z lies within CR , we Reject Ho
# Thus average life time is less than 750hrs, do not buy them
|
ae65c3a13a98689f1b665f28c7d0a7845434fad7
|
1916f798d0b2c2ec56a84b3bbe17911bd8f3f1c0
|
/loadData.R
|
7a55c2aa25ef7254be0d19d936e6128d7fe45f55
|
[] |
no_license
|
potterrr/Expl_data_Analysis_proj2_Plt4
|
cc38dfb33f91e05a6758ed3de4081e7465d8ed20
|
a1113956effb3346099957e844b2282bf75fcded
|
refs/heads/master
| 2020-12-26T03:12:17.043474
| 2015-11-23T22:33:18
| 2015-11-23T22:33:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,281
|
r
|
loadData.R
|
## Download extract and load data into R
# check if complete unziped data is pressent allready if not download and unzip
if (!file.exists("./input/summarySCC_PM25.rds") |
!file.exists("./input/Source_Classification_Code.rds")) {
# creat directory for data
if (!dir.exists("./input")) {
dir.create("./input")
}
# download zip file from url if not pressent in directory
if (!file.exists("./input/proj2Data.zip")) {
download.file(
"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip",
"./input/proj2Data.zip"
)
}
# unzip file.
if (file.exists("./input/proj2Data.zip")) {
cat("extracting file")
unzip("./input/proj2Data.zip",exdir = "./input")
}else{
stop("file proj2Data.zip not found cannot extract")
}
# check to make sure everything worked ok
if (!file.exists("./input/summarySCC_PM25.rds")) {
stop("file summarySCC_PM25.rds not found possible unzip error")
}
if (!file.exists("./input/Source_Classification_Code.rds")) {
stop("file Source_Classification_Code.rds not found possible unzip error")
}
}
# load data into R
nei <- readRDS("./input/summarySCC_PM25.rds")
scc <- readRDS("./input/Source_Classification_Code.rds")
|
509fceaae0ea7c8a63a6044f6566b433792f62d8
|
7c40a7f4735e15b9156fd999fc729924830b5c6b
|
/R/varpart_helper_funs.R
|
2c0ee81a2ccb01555ee2bdeeb63e9dd7ccca7c33
|
[
"MIT"
] |
permissive
|
jannes-m/2020-enso-tdf
|
0e720bbe1daf32fae324df8e4fa83166f1be06e9
|
f230d75cab7c56ecbbe6426b4d631e28b30bed58
|
refs/heads/master
| 2022-12-22T16:53:45.170945
| 2020-09-20T21:37:11
| 2020-09-20T21:37:11
| 286,574,235
| 0
| 0
|
MIT
| 2020-09-24T15:15:13
| 2020-08-10T20:39:24
|
R
|
UTF-8
|
R
| false
| false
| 6,465
|
r
|
varpart_helper_funs.R
|
#' @title Function retrieves the DCA scores of the first two axes and plots them
#' against two environmental tables
#' @details Function applies a DCA, retrieves the scores of the first axes and
#' plots the scores against the environmental variables.
#' @param londo Plot-species matrix with an id column named \code{pnr} which
#' will be used to rename the rownames, and then will be removed prior to
#' executing the ordination.
#' @param expl Dataframe containing environmental variables. The dataframe
#' should have an id column which should have a correspondence in the
#' species-plot-matrix with the same name.
#' @param choices Number of DCA axes to be retained. Only \code{1:2} is
#' currently supported.
#' @return A \code{list} containing two [lattice::xyplot()]s.
#' @author Jannes Muenchow
#' @importFrom dplyr select inner_join
#' @importFrom vegan scores decorana
#' @importFrom reshape2 melt
#' @importFrom lattice xyplot panel.xyplot panel.loess
explore_dca = function(londo, id = "pnr", choices = 1:2, expl) {
# just keep observations found in response and explanatory tables
expl = dplyr::inner_join(dplyr::select(londo, id), expl, by = id)
# get rid off id but keep the id as rownames
rownames(londo) = londo[, id]
londo = dplyr::select(londo, -id)
if (any(colSums(londo) == 0)) {
stop("There are empty columns")
}
# check if each plot has at least one observation
if (!all(rowSums(londo) > 0)) {
stop("There are empty rows!")
}
dca = vegan::decorana(londo, iweigh = TRUE)
# extract scores and add id
dca = dplyr::select(as.data.frame(vegan::scores(dca, display = "sites")),
choices)
dca = cbind(as.integer(row.names(dca)), dca)
names(dca)[1] = id
# join environmental data
tmp = dplyr::inner_join(dca, expl, by = id)
tmp = dplyr::select(tmp, -id)
dca1 = reshape2::melt(dplyr::select(tmp, -DCA2), id.var = "DCA1")
dca2 = reshape2::melt(dplyr::select(tmp, -DCA1), id.var = "DCA2")
p_1 =
lattice::xyplot(DCA1 ~ value | variable, data = dca1,
col = "salmon", pch = 16,
scales = list(relation = "free"),
panel = function(x, y, ...) {
lattice::panel.xyplot(x, y, ...)
lattice::panel.loess(x, y, span = 0.9, lwd = 2.5,
col = "gray")
})
p_2 =
lattice::xyplot(DCA2 ~ value | variable, data = dca2,
col = "salmon", pch = 16,
scales = list(relation = "free"),
panel = function(x, y, ...) {
lattice::panel.xyplot(x, y, ...)
lattice::panel.loess(x, y, span = 0.9, lwd = 2.5, col = "gray")
})
# return your result
list(p_1, p_2)
}
#' @title Variation partitioning applied to an ordination result or
#' Hellinger-transformed matrix
#' @details Function applies variation partitioning to the scores of a DCA or
#' hellinger-transforms the input matrix prior to the variation paritioning.
#' @param londo Plot-species matrix with an id column which will be used to join
#' the environmental data and to rename the rownames prior to its deletion
#' before executing the ordination.
#' @param expl_1 Dataframe containing environmental variables. The dataframe
#' should have an id column which should have a correspondence in the
#' species-plot-matrix with the same name.
#' @param expl_2 Dataframe containing environmental variables. The dataframe
#' should have an id column which should have a correspondence in the
#' species-plot-matrix with the same name.
#' @param choices Number of DCA axes to be retained (default: 1:2).
#' @return Output of [vegan::varpart()].
#' @author Jannes Muenchow
#' @importFrom dplyr inner_join select
#' @importFrom vegan decorana anova.cca rda varpart
ordi_varpart = function(londo, choices = 1:2, id = "pnr", expl_1, expl_2,
dca = TRUE) {
# just keep observations found in response and explanatory tables
expl_1 = dplyr::inner_join(dplyr::select(londo, id), expl_1, by = id)
expl_2 = dplyr::inner_join(dplyr::select(londo, id), expl_2, by = id)
# get rid off id but keep the id as rownames
rownames(londo) = londo[, id]
londo = dplyr::select(londo, -id)
if (any(colSums(londo) == 0)) {
stop("There are empty columns")
}
# check if each plot has at least one observation
if (!all(rowSums(londo) > 0)) {
stop("There are empty rows!")
}
# apply DCA
if (isTRUE(dca)) {
# d = decostand(londo, "pa")
dca = vegan::decorana(londo, iweigh = TRUE)
# plot(dca, display = "sites")
# Single axis contribution
sc = round(dca$evals / sum(dca$evals), 2)
# Cumulative contribution
cc = round(cumsum(dca$evals / sum(dca$evals)), 2)
# extract scores and add id
dca = dplyr::select(as.data.frame(vegan::scores(dca, display = "sites")),
choices)
dca = cbind(as.integer(row.names(dca)), dca)
names(dca)[1] = id
vp = varpart(dplyr::select(dca, -id), dplyr::select(expl_1, -id),
dplyr::select(expl_2, -id))
# # you can apply a weight to the result (but the result is more or less the
# # same just with the difference that the explained variance is much smaller)
# d = vp$part$indfract[1:3, 3]
# weight = cc[ncol(dplyr::select(dca, -id))]
# vp$part$indfract[1:3, 3] = d * weight
# vp$part$indfract[4, 3] = vp$part$indfract[4, 3] + sum(d - (d * weight))
# test fraction a & c
a = vegan::anova.cca(vegan::rda(dplyr::select(dca, -id),
dplyr::select(expl_1, -id),
dplyr::select(expl_2, -id),
step = 1000))
#Test of fraction c
c = vegan::anova.cca(vegan::rda(dplyr::select(dca, -id),
dplyr::select(expl_2, -id),
dplyr::select(expl_1, -id),
step = 1000))
breaks = c(0, 0.001, 0.01, 0.05, 0.1, 1)
labs = labels = c("***", "**", "*", ".", "")
a = cut(a$`Pr(>F)`[1], breaks, labs, include.lowest = FALSE)
c = cut(c$`Pr(>F)`[1], breaks, labs, include.lowest = FALSE)
return(list("vp" = vp, "a" = a, "c" = c))
} else {
return(vegan::varpart(londo, expl_1, expl_2, transfo = "hel"))
}
}
|
487a4f0afc10e659308acad3e1eca7cb005941e1
|
81d787d69aa856acb59a7498f6a6f6b659832b5f
|
/ui.R
|
b18900961dfd89607d9e5e3918d5e953dedf96bd
|
[] |
no_license
|
colinking/dataAnalyticsFinalProject
|
e2f0ccca664bbb837eb804f4036df9681371e40d
|
76e79876cc3e8514d077bf7d7012da4b75269926
|
refs/heads/master
| 2016-08-04T14:43:37.472879
| 2015-01-25T16:51:22
| 2015-01-25T16:51:22
| 29,820,519
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,636
|
r
|
ui.R
|
#install.packages('devtools')
#devtools::install_github('shiny-incubator', 'rstudio')
library(shinyIncubator)
shinyUI(
pageWithSidebar(
headerPanel("Stock Market Simulator"),
sidebarPanel(
h4("Simulator Controls"),
actionButton('playGame', 'Play Simulator', icon = icon("play")),
br(),br(),
actionButton('resetGame', 'Reset Simulator', icon = icon("repeat")),
br(),br(),
actionButton('pauseGame', 'Pause Simulator', icon = icon("pause")),
br(),br(),
actionButton('buyStock', 'Buy Stock', icon = icon("money")),
br(),br(),
actionButton('sellStock', 'Sell Stock', icon = icon("trash-o")),
br(),br(),
sliderInput("speedSlider", "Change How Quickly Time Passes", 20, 5000, value = 1000),
textOutput("gameEndingOutput")
),
mainPanel(
tabsetPanel(
tabPanel("Docs",
h4("Introduction"),
p("Welcome to the Stock Market Simulator!"),
p("Did I say simulator? I meant real life, of course!
Your grandfather has just passed away and left behind a
generous sum of money in your name. It's up to you to
carry out the family name and become a billionaire from
the stock market! (well, you can try at least)"),
br(),
br(),
h4("How to Invest"),
p("Over on the \"Simulator\" tab, you can track the stock prices over time and track how much money you currently have both in stocks and in the bank. You start with $1,000,000 and can choose to buy stocks by clicking the \"Buy Stock\" button which will convert everything in your bank into stocks, based on the current market price. If you feel like the market price is about to drop, sell those stocks! You can do this with the \"Sell Stocks\" button up on the left. Feel free to use your supernatural powers to influence the passage of time by changing the slider on the left. The number on the slider is the number of milliseconds between each market quarter. If time is going too quickly, feel free to click the \"Pause Simulator\" button to stop time. When you're ready, click the \"Play Simulator\" button to start. "),
br(),
br(),
h4("Explanation of Simulator Value"),
p(strong("Year"), ": The current year and quarter"),
p(strong("Stock Price"), ": The current price of stock on the market"),
p(strong("Stock Owned"), ": The number of stocks that you currently own"),
p(strong("Stock Value"), ": The total value of all the stocks that you currently own (current stock price * stocks owned)"),
p(strong("Bank"), ": The amount of money you own that is not in stocks"),
p(strong("Neutral Bank"), ": The Neutral Stock Value and Bank measures how much money you could have made if you bought all the stock you could right at the beginning and then never traded again"),
p(strong("Perfect Bank"), ": The Perfect Stock Value and Bank measures how much money you could have made if you traded at the perfect time, every time as in you sold every time the stock price was about to drop and bought every time the stock was about to rise"),
p(strong("Imperfect Bank"), ": The Imperfect Stock Value and Bank measures how much money you could have made if you traded at the worst possible times, every time, as in you bought stock everytime the stock price was about to drop and sold stock everytime the stock price was about to rise"),
br(),
br(),
p("This simulator was created by Colin King for the
Developing Data Products Class by Johns Hopkins University")
),
tabPanel("Simulator",
plotOutput("plot"),
textOutput("year"),
textOutput("stockprice"),
br(),
textOutput("stocksowned"),
textOutput("valuation"),
textOutput("bank"),
br(),
textOutput("neutralvaluation"),
textOutput("neutralbank"),
br(),
textOutput("perfectvaluation"),
textOutput("perfectbank"),
br(),
textOutput("imperfectvaluation"),
textOutput("imperfectbank"),
br(),
br(),
br(),
br()
)
)
)
)
)
|
66bab61c0ec969043b95a0bb219e6fafb66cf966
|
4d9808b08204d65194923a6a975c98604f5a6746
|
/R/EnrichmentScore.R
|
1f7875ec1d7d5a0f0ad9ea8382c0df496812ab15
|
[
"Apache-2.0"
] |
permissive
|
hanjunwei-lab/MiRSEA
|
41b21f9dcf24e513017a3b100e2cd2411406ff8a
|
2914d2e41ba742589d80e91c0559cc3e83e3692b
|
refs/heads/master
| 2022-11-08T07:45:54.054691
| 2020-06-30T13:18:48
| 2020-06-30T13:18:48
| 275,349,916
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,001
|
r
|
EnrichmentScore.R
|
########################################################################
##get enrichment score for each miR set
EnrichmentScore <- function(miR.list, miR.set, weighted.score.type = 1,
correl.vector = NULL) {
tag.indicator <- sign(match(miR.list, miR.set, nomatch=0)) # notice that the sign is 0 (no tag) or 1 (tag)
no.tag.indicator <- 1 - tag.indicator
N <- length(miR.list)
Nh <- length(miR.set)
Nm <- N - Nh
if (weighted.score.type == 0) {
correl.vector <- rep(1, N)
}
alpha <- weighted.score.type
correl.vector <- abs(correl.vector**alpha)
sum.correl.tag <- sum(correl.vector[tag.indicator == 1])
norm.tag <- 1.0/sum.correl.tag
norm.no.tag <- 1.0/Nm
RES <- cumsum(tag.indicator * correl.vector * norm.tag - no.tag.indicator * norm.no.tag)
max.ES <- max(RES)
min.ES <- min(RES)
if (max.ES > - min.ES) {
ES <- signif(max.ES, digits = 5)
arg.ES <- which.max(RES)
} else {
ES <- signif(min.ES, digits=5)
arg.ES <- which.min(RES)
}
return(list(ES = ES, arg.ES = arg.ES, RES = RES, indicator = tag.indicator))
}
#############################################################################
EnrichmentScore2 <- function(miR.list, miR.set, weighted.score.type = 1, correl.vector = NULL) {
N <- length(miR.list)
Nh <- length(miR.set)
Nm <- N - Nh
loc.vector <- vector(length=N, mode="numeric")
peak.res.vector <- vector(length=Nh, mode="numeric")
valley.res.vector <- vector(length=Nh, mode="numeric")
tag.correl.vector <- vector(length=Nh, mode="numeric")
tag.diff.vector <- vector(length=Nh, mode="numeric")
tag.loc.vector <- vector(length=Nh, mode="numeric")
loc.vector[miR.list] <- seq(1, N)
tag.loc.vector <- loc.vector[miR.set]
tag.loc.vector <- sort(tag.loc.vector, decreasing = FALSE)
if (weighted.score.type == 0) {
tag.correl.vector <- rep(1, Nh)
} else if (weighted.score.type == 1) {
tag.correl.vector <- correl.vector[tag.loc.vector]
tag.correl.vector <- abs(tag.correl.vector)
} else if (weighted.score.type == 2) {
tag.correl.vector <- correl.vector[tag.loc.vector]*correl.vector[tag.loc.vector]
tag.correl.vector <- abs(tag.correl.vector)
} else {
tag.correl.vector <- correl.vector[tag.loc.vector]**weighted.score.type
tag.correl.vector <- abs(tag.correl.vector)
}
norm.tag <- 1.0/sum(tag.correl.vector)
tag.correl.vector <- tag.correl.vector * norm.tag
norm.no.tag <- 1.0/Nm
tag.diff.vector[1] <- (tag.loc.vector[1] - 1)
tag.diff.vector[2:Nh] <- tag.loc.vector[2:Nh] - tag.loc.vector[1:(Nh - 1)] - 1
tag.diff.vector <- tag.diff.vector * norm.no.tag
peak.res.vector <- cumsum(tag.correl.vector - tag.diff.vector)
valley.res.vector <- peak.res.vector - tag.correl.vector
max.ES <- max(peak.res.vector)
min.ES <- min(valley.res.vector)
ES <- signif(ifelse(max.ES > - min.ES, max.ES, min.ES), digits=5)
return(list(ES = ES))
}
|
f26a174406ca642e81a29e09d3a814694a26f52e
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/FRESA.CAD/R/jaccard.r
|
98d45b26497e4cc09ba2ca262d88e5c127765693
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,482
|
r
|
jaccard.r
|
#' returns the jaccard index matrix of two labeled sets
#'
#' jaccard = intersection/union
#' @param clustersA cluster labels of set
#' @param clustersB second cluster labels of the same set
#' @return Returns the jaccard matrix of the labeled set
#' @examples
#' @importFrom
#' @export
jaccardMatrix <- function(clustersA=NULL,clustersB=NULL)
{
jaccardM <- NULL;
jaccardA <- NULL;
meanJaccard <- NULL;
if (length(clustersA) == length(clustersB))
{
minclassA <- min(clustersA)
minclassB <- min(clustersB)
maxlabelA <- max(clustersA);
maxlabelB <- max(clustersB);
jaccardM <- matrix(0,nrow=(maxlabelA-minclassA+1),ncol=(maxlabelB-minclassB)+1);
ii <- 1;
for (i in minclassA:maxlabelA)
{
setA <- (clustersA == i);
jj <- 1;
for (j in minclassB:maxlabelB)
{
setB <- (clustersB == j);
unionsum <- sum(setA | setB);
if (unionsum > 0) jaccardM[ii,jj] <- sum(setA & setB)/unionsum;
jj <- jj + 1;
}
ii <- ii + 1;
}
meanJaccard <- mean(c(apply(jaccardM,2,max),apply(jaccardM,1,max)));
rownames(jaccardM) <- minclassA:maxlabelA
colnames(jaccardM) <- minclassB:maxlabelB
jaccardA <- clustersA;
classA <- as.character(clustersA);
classB <- as.character(clustersB);
for (i in 1:length(clustersA))
{
jaccardA[i] <- jaccardM[classA[i],classB[i]];
}
}
result <- list(jaccardMat=jaccardM,elementJaccard=jaccardA,balancedMeanJaccard=meanJaccard);
return (result)
}
|
c5facb3f4a9747200f33978b874f0c9e41fa5d42
|
771c05fa7b58f8f2dab7938da389e9e72b3cf3d4
|
/Rvasp/man/poscar.extractlayers.Rd
|
7a55eba1fc7df7d732e417b414ea1ef6843ec864
|
[
"MIT"
] |
permissive
|
gokhansurucu/Rvasp
|
56a75b10daa606768791935530bd108204d92f4f
|
8983440a96ca8acf017f47af8dbfd3f32faaad22
|
refs/heads/master
| 2020-04-08T21:14:33.155967
| 2014-03-14T20:08:59
| 2014-03-14T20:08:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 704
|
rd
|
poscar.extractlayers.Rd
|
\name{poscar.extractlayers}
\alias{poscar.extractlayers}
\title{Extracts atoms of object of class poscar}
\usage{
poscar.extractlayers(poscar, layer, layers,
vacuum = c(0, 0, 0), center = T)
}
\arguments{
\item{poscar}{object of class poscar}
\item{layer}{indices of layers which will be extracted}
\item{layers}{total layer count}
\item{vacuum}{sets vacuum, see \code{poscar.setvacuum}
for futher informations}
\item{center}{centers new poscar,see
\code{poscar.setvacuum} for futher informations}
}
\description{
\code{poscar.extractlayers} extracts atoms, based on
layers, of object of class poscar. Will give object of
class poscar. Will not change position of atoms.
}
|
49907ef73eb753799e44fbb1d0494b3d11bb7845
|
1d95131d65ea71dfa5d3bbe40886577b3d97879c
|
/getDataStockPrices.R
|
22aa406c5a1f9ecd083aa41e7b56f38b82619065
|
[] |
no_license
|
agranado/LSTM_timeseries
|
e3597de9e0efba19e242c47eedeae9ff7086c1aa
|
2c77644ae89b6b4145b732b25fabbbe3b3ab66a1
|
refs/heads/main
| 2023-02-27T05:47:13.356337
| 2021-02-06T20:51:05
| 2021-02-06T20:51:05
| 336,632,354
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 622
|
r
|
getDataStockPrices.R
|
library(BatchGetSymbols)
# we get data from 2000 to include the dot.com recovery
first.date <- '2000-01-01'
# Until today
last.date <- Sys.Date()
freq.data <- 'daily'
symbols = c('MSFT','NVDA', 'AMZN', 'GOOGL','AAPL', '^IXIC')
aa =BatchGetSymbols(symbols, first.date = first.date, last.date = last.date, freq.data = freq.data)
# First element in the list is meta data of the download
# relocate the date column for index in python
aa[[2]] %>% dplyr::select(ref.date, everything() ) -> data_export
# Write to drive
write.csv(data_export, 'stock_prices_from2000.csv', quote =F, row.names = F)
|
930756f26e4329800b89f94425410b06102c6fca
|
1233bd68fa715c898ea416f1945235bd1ee341ac
|
/scripts/particle_analysis_1.R
|
8c338cd0a677e586e87fb441e970efcb8e957066
|
[] |
no_license
|
grapp1/mb_sensitivity
|
98a3ef97e989b99f945e452b2859efb77c0a05fe
|
783531044cd8877a21e32803543a0eb8bd4d8453
|
refs/heads/master
| 2021-06-14T06:36:09.843400
| 2020-09-01T00:56:39
| 2020-09-01T00:56:39
| 254,479,809
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,904
|
r
|
particle_analysis_1.R
|
# -------------------------------------------------------------------------------------------------
# particle_analysis_1.R
# EcoSLIM analysis script
# to generate saturated age PDFs and variance plots for six scenarios
# -------------------------------------------------------------------------------------------------
library(ggplot2)
library(ggnewscale)
library(tidyr)
library(readr)
library(dplyr)
library(openxlsx)
library(cowplot)
library(zoo)
library(plotrix)
library(plyr)
library(spatstat)
library(gridExtra)
source("~/mb_sensitivity/scripts/prob_dens_fxn.R")
source("~/mb_sensitivityh/scripts/EcoSLIM_read_fxn_update.R")
source("~/mb_sensitivity/scripts/cell_agg_fxn.R")
source("~/mb_sensitivity/scripts/var_bin_fxn.R")
source("~/mb_sensitivity/scripts/particle_flowpath_fxn.R")
# -------------------------------------------------------------------------------------------------
# loading particle output files
# -------------------------------------------------------------------------------------------------
load(file="~/mb_sensitivity/outputs/exit_particles/exited_particles_A.Rda")
load(file="~/mb_sensitivity/outputs/exit_particles/exited_particles_B.Rda")
load(file="~/mb_sensitivity/outputs/exit_particles/exited_particles_C.Rda")
load(file="~/mb_sensitivity/outputs/exit_particles/exited_particles_D.Rda")
load(file="~/mb_sensitivity/outputs/exit_particles/exited_particles_F.Rda")
load(file="~/mb_sensitivity/outputs/exit_particles/exited_particles_G.Rda")
load(file="~/mb_sensitivity/outputs/exit_particles/exited_particles_H.Rda")
# -------------------------------------------------------------------------------------------------
# generating PDF for scenarios
# -------------------------------------------------------------------------------------------------
bin_size_age <- 3
pdf_exit_A_fw1 <- pdfxn(exited_particles_A, max(exited_particles_A$age), bin_size_age,column = "sat_age")
pdf_exit_B_fw1 <- pdfxn(exited_particles_B, max(exited_particles_B$age), bin_size_age,column = "sat_age")
pdf_exit_C_fw1 <- pdfxn(exited_particles_C, max(exited_particles_C$age), bin_size_age,column = "sat_age")
pdf_exit_D_fw1 <- pdfxn(exited_particles_D, max(exited_particles_D$age), bin_size_age,column = "sat_age")
pdf_exit_F_fw1 <- pdfxn(exited_particles_F, max(exited_particles_F$age), bin_size_age,column = "sat_age")
pdf_exit_G_fw1 <- pdfxn(exited_particles_G, max(exited_particles_G$age), bin_size_age,column = "sat_age")
pdf_exit_H_fw1 <- pdfxn(exited_particles_H, max(exited_particles_H$age), bin_size_age,column = "sat_age")
pdf_exit_A_fw1$scen <- "Homogeneous"
pdf_exit_B_fw1$scen <- "Two-layered"
pdf_exit_C_fw1$scen <- "Three-layered"
pdf_exit_F_fw1$scen <- "Variable Soil"
pdf_exit_D_fw1$scen <- "Low-K Exponential Decay"
pdf_exit_H_fw1$scen <- "Anisotropic"
pdf_exited_all <- rbind(pdf_exit_A_fw1,pdf_exit_B_fw1,pdf_exit_C_fw1,pdf_exit_F_fw1,pdf_exit_H_fw1,pdf_exit_D_fw1)
pdf_exit_G_fw1$scen <- "High-K Exponential Decay"
pdf_exited_exp <- rbind(pdf_exit_A_fw1,pdf_exit_D_fw1,pdf_exit_G_fw1)
pdf_fig <- ggplot() + geom_line(data = pdf_exited_all, aes(x = sat_age,y = Density_pdf, group=scen,col = scen), size = 0.75) +
scale_x_log10(name="",limits = c(3,800), breaks = c(3,25,50,100,200,400,600,800,1000),labels = scales::comma,expand=c(0,0)) +
scale_y_continuous(name="", expand=c(0,0), breaks = seq(0,0.12,0.01), limits = c(0,0.07)) +
scale_color_manual(values = c("purple", "black","darkorange","dodgerblue","firebrick","green3")) + labs(color = "Scenario") +
expand_limits(x = 100, y = 0) + theme_bw() +
theme(panel.border = element_rect(colour = "black", size=1, fill=NA), panel.grid.major = element_line(colour="grey", size=0.1), legend.position = "none",
legend.background = element_rect(linetype="solid", colour ="black"),plot.margin = margin(15,15,15,15),
title =element_text(size=12, face='bold'),axis.text.x = element_text(color="black",size=12),axis.text.y = element_text(color="black",size=12))
pdf_fig
pdf_fig_exp <- ggplot() + geom_line(data = pdf_exited_exp, aes(x = sat_age,y = Density_pdf, group=scen,col = scen), size = 0.75) +
scale_x_log10(name="",limits = c(3,800), breaks = c(3,25,50,100,200,400,600,800,1000),labels = scales::comma,expand=c(0,0)) +
scale_y_continuous(name="", expand=c(0,0), breaks = seq(0,0.12,0.02), limits = c(0,0.12)) +
scale_color_manual(values = c("khaki4","black","darkorange")) + labs(color = "Scenario") +
expand_limits(x = 100, y = 0) + theme_bw() +
theme(panel.border = element_rect(colour = "black", size=1, fill=NA), panel.grid.major = element_line(colour="grey", size=0.1), legend.position = "none",
legend.background = element_rect(linetype="solid", colour ="black"),plot.margin = margin(15,15,15,15),
title =element_text(size=12, face='bold'),axis.text.x = element_text(color="black",size=12),axis.text.y = element_text(color="black",size=12))
pdf_fig_exp
# -------------------------------------------------------------------------------------------------
# calculating variance time series
# -------------------------------------------------------------------------------------------------
var_bin <- 3
var_spath_A <- var_bin_fxn(exited_particles_A, max(exited_particles_A$age), var_bin,column1 = "sat_age",column2 = "spath_len")
var_spath_B <- var_bin_fxn(exited_particles_B, max(exited_particles_B$age), var_bin,column1 = "sat_age",column2 = "spath_len")
var_spath_C <- var_bin_fxn(exited_particles_C, max(exited_particles_C$age), var_bin,column1 = "sat_age",column2 = "spath_len")
var_spath_D <- var_bin_fxn(exited_particles_D, max(exited_particles_D$age), var_bin,column1 = "sat_age",column2 = "spath_len")
var_spath_F <- var_bin_fxn(exited_particles_F, max(exited_particles_F$age), var_bin,column1 = "sat_age",column2 = "spath_len")
var_spath_G <- var_bin_fxn(exited_particles_G, max(exited_particles_G$age), var_bin,column1 = "sat_age",column2 = "spath_len")
var_spath_H <- var_bin_fxn(exited_particles_H, max(exited_particles_H$age), var_bin,column1 = "sat_age",column2 = "spath_len")
var_spath_A$scen <- "Homogeneous"
var_spath_B$scen <- "Two-layered"
var_spath_C$scen <- "Three-layered"
var_spath_F$scen <- "Variable Soil"
var_spath_D$scen <- "Low-K Exponential Decay"
var_spath_H$scen <- "Anisotropic"
var_spath_G$scen <- "High-K Exponential Decay"
var_bin_1 <- rbind(var_spath_A,var_spath_B,var_spath_C,var_spath_F)
var_bin_1 <- var_bin_1[which(var_bin_1$count > 9),]
var_bin_2 <- rbind(var_spath_A,var_spath_H,var_spath_D)
var_bin_2 <- var_bin_2[which(var_bin_2$count > 9),]
var_bin_all <- rbind(var_spath_A,var_spath_B,var_spath_C,var_spath_F,var_spath_H,var_spath_D)
var_bin_all <- var_bin_all[which(var_bin_all$count > 9),]
var_bin_exp <- rbind(var_spath_A,var_spath_D,var_spath_G)
var_bin_exp <- var_bin_exp[which(var_bin_exp$count > 9),]
# -------------------------------------------------------------------------------------------------
# generating variance plot
# -------------------------------------------------------------------------------------------------
var_bin_fig <- ggplot(data = var_bin_all, aes(x = sat_age,y = variance, group=scen,col = scen)) + geom_line(size = 0.75) + #geom_point(size =0.5) +
scale_x_continuous(name="Saturated age (yr)",limits = c(0,800), breaks=c(0,100,200,300,400,500,600,700,800),labels = scales::comma,expand=c(0,0)) +
ggtitle("Variance of saturated lengths of exited particles") +
scale_y_log10(name=bquote('Variance of saturated path lengths ('*m^2*')'), expand=c(0,0), limits = c(1000,1000000000), breaks = c(1000,10000,100000,1000000,10000000,100000000,1000000000,10000000000)) +
scale_color_manual(values = c("purple", "black","darkorange","dodgerblue","firebrick","green3")) + labs(color = "Scenario") +
expand_limits(x = 10, y = 0) + theme_bw() +
theme(panel.border = element_rect(colour = "black", size=1, fill=NA), panel.grid.major = element_line(colour="grey", size=0.1), legend.position = "none",
legend.background = element_rect(linetype="solid", colour ="black"),plot.margin = margin(5,15,5,5),
title =element_text(size=12, face='bold'),axis.text.x = element_text(color="black",size=10),axis.text.y = element_text(color="black", size = 10)) +
theme(axis.title.y = element_text(color="black",face='bold'))
var_bin_fig
var_bin_exp <- ggplot(data = var_bin_exp, aes(x = sat_age,y = variance, group=scen,col = scen)) + geom_line(size = 0.75) + #geom_point(size =0.5) +
scale_x_continuous(name="Saturated age (yr)",limits = c(0,800), breaks=c(0,100,200,300,400,500,600,700,800),labels = scales::comma,expand=c(0,0)) +
ggtitle("Variance of saturated lengths of exited particles") +
scale_y_log10(name=bquote('Variance of saturated path lengths ('*m^2*')'), expand=c(0,0), limits = c(1000,1000000000), breaks = c(1000,10000,100000,1000000,10000000,100000000,1000000000,10000000000)) +
scale_color_manual(values = c("khaki4", "black","darkorange")) + labs(color = "Scenario") +
expand_limits(x = 10, y = 0) + theme_bw() +
theme(panel.border = element_rect(colour = "black", size=1, fill=NA), panel.grid.major = element_line(colour="grey", size=0.1), legend.position = "none",
legend.background = element_rect(linetype="solid", colour ="black"),plot.margin = margin(5,15,5,5),
title =element_text(size=12, face='bold'),axis.text.x = element_text(color="black",size=10),axis.text.y = element_text(color="black", size = 10)) +
theme(axis.title.y = element_text(color="black",face='bold'))
var_bin_exp
# -------------------------------------------------------------------------------------------------
# calculating sensitivity metric values
# -------------------------------------------------------------------------------------------------
sens_fp <- data.frame(matrix(NA, nrow = 7, ncol = 4))
colnames(sens_fp)<- c("scen","mean","sd","sens_fp")
sens_fp$scen <- c("Homogeneous", "Two-layered", "Three-layered", "Variable Soil", "Anisotropic", "Low-K Exp Decay", "High-K Exp Decay")
sens_fp$mean[1] <- mean(exited_particles_A$sat_age)
sens_fp$sd[1] <- sd(exited_particles_A$sat_age)
sens_fp$mean[2] <- mean(exited_particles_B$sat_age)
sens_fp$sd[2] <- sd(exited_particles_B$sat_age)
sens_fp$mean[3] <- mean(exited_particles_C$sat_age)
sens_fp$sd[3] <- sd(exited_particles_C$sat_age)
sens_fp$mean[4] <- mean(exited_particles_F$sat_age)
sens_fp$sd[4] <- sd(exited_particles_F$sat_age)
sens_fp$mean[5] <- mean(exited_particles_H$sat_age)
sens_fp$sd[5] <- sd(exited_particles_H$sat_age)
sens_fp$mean[6] <- mean(exited_particles_D$sat_age)
sens_fp$sd[6] <- sd(exited_particles_D$sat_age)
sens_fp$mean[7] <- mean(exited_particles_G$sat_age)
sens_fp$sd[7] <- sd(exited_particles_G$sat_age)
for(i in 1:nrow(sens_fp)){
pct_chg_mean <- abs((sens_fp$mean[i] - sens_fp$mean[1])/sens_fp$mean[1])
pct_chg_sd <- abs((sens_fp$sd[i] - sens_fp$sd[1])/sens_fp$sd[1])
sens_fp$sens_fp[i] <- pct_chg_mean + pct_chg_sd
}
|
871ddc803f9e42a09e39104d16a58a9e3f77366c
|
994012ddc5c85019df5101b7c4d4f824fb9d02af
|
/Ch.7/GelmanHill2007_Ch7Question1_IRC.R
|
b33421e4b79213550b6a733e2c35cb6ec6d1b0d1
|
[] |
no_license
|
donahuem/Gelman_Hill_readinggroup
|
655c9d7c54606286472d44cce9bd6a3777c728ca
|
8562c2b55071ca61e0623abc27a5d812d721b8ad
|
refs/heads/master
| 2020-12-12T22:57:12.594422
| 2017-02-10T22:43:43
| 2017-02-10T22:47:19
| 50,400,514
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,665
|
r
|
GelmanHill2007_Ch7Question1_IRC.R
|
#Gelman & Hill - Hierarchical Models
#Chapter 7 Questions
#Question 1
# Discrete probability simulation: suppose that a basketball player has a 60%
# chance of making a shot, and he keeps taking shots until he misses two in a
# row. Also assume his shots are independent (so that each shot has 60% probability
# of success, no matter what happened before).
# (a) Write an R function to simulate this process.
basketballShots <- function(probOfSuccess) {
firstTwoShots = rbinom(2, 1, probOfSuccess)
allShots = firstTwoShots
numShots = length(allShots)
sumTwoLastShots = allShots[numShots] + allShots[numShots-1]
while(sumTwoLastShots > 0) {
newShot <- rbinom(1, 1, probOfSuccess)
allShots <- append(allShots, newShot)
numShots = length(allShots)
sumTwoLastShots = allShots[numShots] + allShots[numShots-1]
}
return(allShots)
}
# (b) Put the R function in a loop to simulate the process 1000 times. Use the
# simulation to estimate the mean, standard deviation, and distribution of the
# total number of shots that the player will take.
numShotsVect = c()
numSuccesses = c()
for (i in 1:1000) {
newShots = basketballShots(0.6)
numShotsVect = append(numShotsVect, length(newShots))
numSuccesses = append(numSuccesses, sum(newShots))
}
meanShots = mean(numShotsVect)
stdevShots = sd(numShotsVect)
hist(numShotsVect) #has a skewed distribution towards the low end.
# (c) Using your simulations, make a scatterplot of the number of shots the player
# will take and the proportion of shots that are successes.
propSuccesses = numSuccesses/numShotsVect
plot(numShotsVect, propSuccesses)
|
676b62b19e3afb457a934ee600becc764fa6c253
|
c8e71af48d925c34d1cb9f4dad262c970e8968d5
|
/man/InsuranceVote.Rd
|
2404cdba85b59c48ca6cb21fbfd33c18ea707f87
|
[
"MIT"
] |
permissive
|
tessington/qsci381
|
43c7cd323ab64cf28ba738be35779157c93e62cf
|
b981f0bd345b250d42ff5f1c0609e5e61f5911f7
|
refs/heads/master
| 2022-12-24T20:56:56.045374
| 2020-09-24T20:50:29
| 2020-09-24T20:50:29
| 284,817,926
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,589
|
rd
|
InsuranceVote.Rd
|
\name{InsuranceVote}
\alias{InsuranceVote}
\docType{data}
\title{Congressional Votes on a Health Insurance Bill}
\description{
Congressional votes on an ObamaCare health insurance bill in 2009
}
\format{
A dataset with 435 observations on the following 9 variables.
\tabular{rl}{
\code{Party} \tab {Party affiliation: \code{D}=Democrat or \code{R}=Republican}\cr
\code{Dist.} \tab {Congressional district (State-Number)}\cr
\code{InsVote} \tab {Vote on the health insurance bill: \code{1}=yes or \code{0}=no}\cr
\code{Rep} \tab {Indicator for Republicans}\cr
\code{Dem} \tab {Indicator for Democrats}\cr
\code{Private} \tab {Percentage of non-senior citizens in district with private health insurance}\cr
\code{Public} \tab {Percentage of non-senior citizens in district with public health insurance}\cr
\code{Uninsured} \tab {Percentage of non-senior citizens in district with no health insurance}\cr
\code{Obama} \tab {District winner in 2008 presidential election: \code{1}=Obama \code{0}=McCain}\cr
}
}
\details{
On 7 November 2009 the U.S. House of Representatives voted, by the
narrow margin of 220-215, for a bill to enact health insurance reform. Most Democrats voted yes
while almost all Republicans voted no. This dataset contains data for each of the 435
representatives.
}
\source{
Insurance data are from the American Community Survey\cr
(http://www.census.gov/acs/www/data_documentation/data_main/).
Roll call of congressional votes on this bill can be found
at \cr
http://clerk.house.gov/evs/2009/roll887.xml.
}
\keyword{datasets}
|
d53c5ebf92164ef6f9c33fd827905256ca94aec7
|
57641b8222ba9f6c4dad5080326630bfb85b592c
|
/man/SplitSentences.Rd
|
82a1a093598bff8a0656eeea24c9bcaddcba3b86
|
[] |
no_license
|
M3SOulu/EmoticonFindeR
|
28ac13272d7bf3298423b8462a22af117e3748e9
|
fb99960b6f2ddaab85b1ef7a2715a8c757658134
|
refs/heads/master
| 2022-06-28T07:36:24.370149
| 2022-06-20T10:45:55
| 2022-06-20T10:45:55
| 192,537,114
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 494
|
rd
|
SplitSentences.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sentences.R
\name{SplitSentences}
\alias{SplitSentences}
\title{Split sentences}
\usage{
SplitSentences(text, nclusters = parallel::detectCores())
}
\arguments{
\item{text}{Text to split into sentences.}
\item{nclusters}{Number of clusters to use for running openNLP}
}
\value{
List of same size as text with each element a vector of sentences.
}
\description{
Split text into individual sentences using openNLP.
}
|
427e3cbe2cf3a030ebec598681b81ad76c12a91a
|
45c1672d4885fb27d9210e688f1861ab5d7407fc
|
/randomize_network/load_links_file_format.R
|
dc2e9462831d346589700636cb90b05f85691bd5
|
[] |
no_license
|
seoanezonjic/sys_bio_lab_scripts
|
b518ff0a1b57fe3a4abd6b81192977d8a61fcfa0
|
77d57fd70deafb39d85576c2d9f7fc619fdd30eb
|
refs/heads/master
| 2023-05-30T00:16:54.032085
| 2023-05-22T12:32:40
| 2023-05-22T12:32:40
| 216,526,241
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,114
|
r
|
load_links_file_format.R
|
#' @author Fernando Moreno Jabato <jabato@uma.com>
#' @description function to load specific links file formarts
#' @RVersion 3.4.2
load_links_file_format <- function(file,sep="\t",header=F){
#' Method to load a table of relationships loaded in a specific format into a given file.
#' After load, info is parsed and returned in the correct format.
#' Allowed formats are: HPO_HPO_Format and HPO_Region_Format which have the following
#' formats:
#' > HPO_HPO_Format:
#' - Header: NO
#' - Sep: "\t"
#' - Line info format: <HPO> <HPO> <Intensity>
#' - Line info types: {c,c,d}
#' - Note: third column (intesity) is optional
#' > HPO_Region_Format:
#' - Header: NO
#' - Sep: "\t"
#' - Line info format: <Chr> <Start> <End> <HPO> <Intensity> <NodeID>
#' - Line info types: {c,i,i,c,d,c}
#' > Loci_HPO_format:
#' - Header: NO
#' - Sep: "\t"
#' - Line info format: <NodeID> <HPO>
#' - Line info types: {c,c}
#' @param file with info to be loaded and parsed
#' @param sep columns separator used into file
#' @param header indicates if file has, or not, header
#' @return a dataframe with formated info or a string with an error description if any error occurs
# Config values (A = HPO_HPO ; B = HPO_Region)
ncol_formatA <- 3
ncol_formatB <- 6
ncol_specialFormat <- 2
# Check inputs
if(is.null(file)){ # File
return("ERROR [load_links_file_format]: Given file is NULL pointer")
}else if(!is.character(file)){
return("ERROR [load_links_file_format]: Given file is not a character value")
}else if(!file.exists(file)){
return("ERROR [load_links_file_format]: Given file does not exists")
}else if(file.access(file,mode=4) != 0){ # Mode 4 = Read permission
return("ERROR [load_links_file_format]: Given file can not be read")
}else if(file.size(file) == 0){
return("ERROR [load_links_file_format]: Given file is empty file")
}
if(is.null(sep)){
return("ERROR [load_links_file_format]: Given separator is NULL pointer")
}else if(!is.character(sep)){
return("ERROR [load_links_file_format]: Given seperator is not a character value")
}else if(nchar(sep) == 0){
return("ERROR [load_links_file_format]: Given separator is an empty string")
}
if(is.null(header)){
return("ERROR [load_links_file_format]: Given Header flag is NULL pointer")
}else if(!is.logical(header)){
return("ERROR [load_links_file_format]: Given Header flag is not a logical value")
}
# Load file info
info <- read.table(file,sep = sep, header = header)
# Check
if(length(dim(info)) != 2){
return("ERROR [load_links_file_format]: Read info is not a table")
}else if(any(dim(info)==0)){
return("ERROR [load_links_file_format]: Table is empty or is vector")
}
# Check format
if(ncol(info) == ncol_formatA | ncol(info) == ncol_specialFormat){ # FORMAT: HPO-HPO (weighted or not) or Loci-HPO
# Check if it's a Loci-HPO file
if(ncol(info) == ncol_specialFormat){
if(all(grepl("^HP:[0-9]{7}$", info[,2])) & !all(grepl("^HP:[0-9]{7}$",info[,1]))){
info <- as.data.frame(info,stringsAsFactors = F)
colnames(info) <- c("Loci","HPO")
info[,1] <- as.character(info[,1])
info[,2] <- as.character(info[,2])
return(info)
}
}
# Check each column
if(!all(grepl("^HP:[0-9]{7}$",info[,1]))){ # Column 1: HP code
return("ERROR [load_links_file_format]: Column 1 has not correct HPO-HPO format")
}else if(!all(grepl("^HP:[0-9]{7}$",info[,2]))){ # Column 2: HP code
return("ERROR [load_links_file_format]: Column 2 has not correct HPO-HPO format")
}else if(ncol(info) == 3){
weighted <- T
if(any(is.na(as.numeric(info[,3])) | !is.double(as.numeric(info[,3])))){ # Column 3: Relationship weight
return("ERROR [load_links_file_format]: Column 3 has not correct HPO-HPO format")
}
}else{ # Everything OK
weighted <- F
}
# Parse format
info <- as.data.frame(info)
info[,1] <- as.character(info[,1])
info[,2] <- as.character(info[,2])
if(weighted){
info[,3] <- as.double(info[,3])
colnames(info) <- c("HPO_1","HPO_2","Value")
}else{
colnames(info) <- c("HPO_1","HPO_2")
}
# return
return(info)
}else if(ncol(info) == ncol_formatB){ # FORMAT: HPO-REGION
# Check each column
if(!all(grepl("(^[1-9]$)|(^1[0-9]$)|(^2[0-2]$)|(^[X,Y]$)",info[,1]))){ # Column 1: Chromosome
return("ERROR [load_links_file_format]: Column 1 has not correct HPO-Region format")
}else if(!all(grepl("^[0-9]*$",info[,2]))){ # Column 2: Start coord
return("ERROR [load_links_file_format]: Column 2 has not correct HPO-Region format")
}else if(!all(grepl("^[0-9]*$",info[,3]))){ # Column 3: End coord
return("ERROR [load_links_file_format]: Column 3 has not correct HPO-Region format")
}else if(!all(grepl("^HP:[0-9]{7}$",info[,4]))){ # Column 4: HP code
return("ERROR [load_links_file_format]: Column 4 has not correct HPO-Region format")
}else if(any(is.na(as.numeric(info[,5])) | !is.double(as.numeric(info[,5])))){ # Column 5: Relationship weight
return("ERROR [load_links_file_format]: Column 5 has not correct HPO-Region format")
}else if(!all(grepl("^(([1-9])|(1[0-9])|(2[0-2])|([X,Y]))\\.[0-9]{1,3}\\.[A-Z]\\.[0-9]{1,3}$",info[,6]))){
return("ERROR [load_links_file_format]: Column 6 has not correct HPO-Region format")
}else{ # Everything OK
# Parse info
info <- as.data.frame(info)
info[,1] <- as.character(info[,1])
info[,2] <- as.integer(info[,2])
info[,3] <- as.integer(info[,3])
info[,4] <- as.character(info[,4])
info[,5] <- as.double(info[,5])
info[,6] <- as.character(info[,6])
colnames(info) <- c("Chr","Start","End","HPO","Value","Node")
# Return
return(info)
}
}else{ # FORMAT: NOT ALLOWED
return("ERROR [load_links_file_format]: Info format is not allowed")
}
}
|
304883c63c7b33349b2ac0681c35e87242e2c1d2
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/gaussDiff/R/gaussDiff.R
|
c783b0d7203aef4ecb5db9f02d176c497976c27c
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,686
|
r
|
gaussDiff.R
|
######################################################
### gaussDiv.R
### implement multiple divergence measures to compare normal
### pdfs, i.e. similarity and dissimilarity measures
### Henning Rust Paris 18/02/09
### all the implemented similarity and dissimilarity measures
### are described in the chapter
### Dissimilatiry Measures for Probability Distributions
### in the book "Analysis of Symbolic Data" by Hans-Hermann Bock
## visible functions
##--------------------------
### general wrapper function
normdiff <- function(mu1,sigma1=NULL,mu2,sigma2=sigma1,inv=FALSE,s=0.5,method=c("Mahalanobis","KL","J","Chisq","Hellinger","L2","Euclidean")){
## maybe I catch some erros, checking simgas for square and symmetry
## Euclidean for having this also in the same wrapper
d <- switch(match.arg(method),
Mahalanobis=normdiff.maha(mu1,sigma1,mu2),
KL=normdiff.KL(mu1,sigma1,mu2,sigma2),
J=normdiff.J(mu1,sigma1,mu2,sigma2),
Chisq=normdiff.Chisq(mu1,sigma1,mu2,sigma2),
Hellinger=normdiff.Hellinger(mu1,sigma1,mu2,sigma2),
L2=normdiff.L2(mu1,sigma1,mu2,sigma2),
Euclidean=sqrt(sum((mu1-mu2)**2)))
class(d) <- "normdiff"
attr(d,"method") <- match.arg(method)
if(!is.null(inv)) attr(d,"inverse") <- inv
if(!is.null(s)) attr(d,"s") <- s
return(d)
}
## internal functions
##--------------------------
### trace of a matrix
tt <- function(A) sum(diag(A))
### Mahalanobis distance
maha <- function(x,A,factor=FALSE){
## if factor, then distance is multiplied with 0.5
inv.A <- solve(A)
d <- t(x)%*%inv.A%*%x
if(factor) d <- 0.5*d
return(as.vector(d))
}
### Mahalanobis Distance
normdiff.maha <- function(mu1,sigma1,mu2){
d <- NA
if(!(is.null(sigma1)))
d <- maha(mu1-mu2,sigma1,factor=TRUE)
return(as.vector(d))
}
### Kullback-Leibler divergence
normdiff.KL <- function(mu1,sigma1,mu2,sigma2){
d <- NA
if(!(is.null(sigma1)|is.null(sigma2))){
N <- nrow(sigma1)
inv.sigma1 <- solve(sigma1)
d <- maha(mu1-mu2,sigma1)+
tt(inv.sigma1%*%sigma2-diag(1,N)) +
log(det(sigma1)/det(sigma2))
d <- 0.5*d
}
return(as.vector(d))
}
### J-coefficient, symmetric
### symmetrized Kullback-Leibler
normdiff.J <- function(mu1,sigma1,mu2,sigma2){
d <- NA
if(!(is.null(sigma1)|is.null(sigma2))){
N <- nrow(sigma1)
inv.sigma1 <- solve(sigma1)
inv.sigma2 <- solve(sigma2)
d <- maha(mu1-mu2,sigma1)+maha(mu1-mu2,sigma2)+
tt(inv.sigma1%*%sigma2-diag(1,N)) +
tt(inv.sigma2%*%sigma1-diag(1,N))
d <- 0.5*d
}
return(as.vector(d))
}
### chi-square divergence for pdfs
normdiff.Chisq <- function(mu1,sigma1,mu2,sigma2){
d <- NA
if(!(is.null(sigma1)|is.null(sigma2))){
N <- nrow(sigma1)
inv.sigma1 <- solve(sigma1)
inv.sigma2 <- solve(sigma2)
sig1.invsig2 <- sigma1%*%inv.sigma2
d <- det(sig1.invsig2)/sqrt(det(2*sig1.invsig2-diag(1,N)))*
exp(0.5*(maha(2*inv.sigma2%*%mu2-inv.sigma1%*%mu1,2*inv.sigma2-inv.sigma1)+
maha(mu1,sigma1)-2*maha(mu2,sigma2)))-1
}
return(as.vector(d))
}
### Hellinger distance,
### similarity measure
### 0<d<1,
### d=1, if P=Q,
### d=0, if P, Q have disjoint supports
### symmetric for s=0.5
normdiff.Hellinger<- function(mu1,sigma1,mu2,sigma2,s=0.5,inv=FALSE){
d <- NA
if(!(is.null(sigma1)|is.null(sigma2))){
N <- nrow(sigma1)
I <- diag(1,N)
inv.sigma1 <- solve(sigma1)
inv.sigma2 <- solve(sigma2)
sig1inv.sig2 <- inv.sigma1%*%sigma2
sig2inv.sig1 <- inv.sigma2%*%sigma1
d <- det(s*I+(1-s)*sig1inv.sig2)**(-s/2)*
det((1-s)*I+s*sig2inv.sig1)**(-(1-s)/2)*
exp(0.5*(maha(s*inv.sigma2%*%mu2+(1-s)*inv.sigma1%*%mu1,s*inv.sigma2+(1-s)*inv.sigma1)-
s*maha(mu2,sigma2)-(1-s)*maha(mu1,sigma1)))
if(inv) d <- 1-d
}
return(as.vector(d))
}
### Minkowskis L2-distance
### symmetric dissimilarity coefficient
normdiff.L2<- function(mu1,sigma1,mu2,sigma2){
d <- NA
if(!(is.null(sigma1)|is.null(sigma2))){
N <- nrow(sigma1)
inv.sigma1 <- solve(sigma1)
inv.sigma2 <- solve(sigma2)
d <- 1/(2**N*pi**(N/2))*(1/sqrt(det(sigma1))+1/sqrt(det(sigma2)))-
2/((2*pi)**(N/2)*sqrt(det(sigma1+sigma2)))*
exp(0.5*(maha(inv.sigma1%*%mu1+inv.sigma2%*%mu2,inv.sigma1+inv.sigma2)-
maha(mu1,sigma1)-maha(mu2,sigma2)))
}
return(as.vector(d))
}
### a print function
print.normdiff <- function(x,...){
cat("Gaussian PDF distance:",attr(x,"method"))
if(attr(x,"method")=="Hellinger"){
cat(" with s=",attr(x,"s"),", ")
if(attr(x,"inv")==TRUE)
cat("inverted")
}
cat("\n")
cat(x,"\n")
}
|
25a332c666c406be64b96f846c523f30d32ac916
|
ca19c130932d2ff7f3680ec42bc23e830c0e5d8b
|
/examples/SVWQR.R
|
fe783671e999b1a0fbd865dca95b83510ee87ac3
|
[] |
no_license
|
PedroBSB/mlRFinance
|
1765a3c0993b06cb2f4248a6d2dbc6a236066b88
|
af5cc4c99f3ec7d90a123ce4c402f69b2a358fd8
|
refs/heads/master
| 2021-03-27T12:00:35.227497
| 2017-12-05T18:35:52
| 2017-12-05T18:35:52
| 71,817,274
| 8
| 21
| null | 2017-12-05T18:35:53
| 2016-10-24T18:09:36
|
C++
|
UTF-8
|
R
| false
| false
| 898
|
r
|
SVWQR.R
|
#Page 160 - Small
library(mlRFinance)
A<-matrix(c(1,2,5,6),nrow=4,ncol=1)
d<-c(-1,-1,+1,-1)
rank<-order(d)
svm2<- CSVWQR(d,rank, A, 50,0.5 ,0.5, "Polynomial", c(2,1))
svm2
PredictedCSVRL1(svm2, A, A)
R2PredictedCSVRL1(svm2, A)
#Habilita o pacote quantmod
library(quantmod)
#Cria um novo ambiente para armazenar os dados
stockData <- new.env()
#Especifica as datas de interesse
startDate = as.Date("2011-01-01")
endDate = as.Date("2011-12-31")
#Obtêm os dados do ativo PETR4 e PETR3
getSymbols("^BVSP", src="yahoo",from=startDate,to=endDate)
#Calcula o log-retorno
retorno<-na.omit(diff(log(Cl(BVSP))))
train <- as.numeric(retorno[1:180])
valid <- as.numeric(retorno[181:216])
#Cria os objetos
y<-train[2:length(train)]
X<-matrix(train[1:(length(train)-1)],ncol=1)
#SVR
svm2<- CSVWQR(y, X, 1, 0.5, 1.0, "Gaussian", c(0.5))
svm2
PredictedCSVRL1(svm2, X, X)
R2PredictedCSVRL1(svm2, X)
|
07176d2fccf94d3c73a7bc69ae0bfbe33c4c5744
|
afa19b675487da30bd0f0d45d29d9b2d220f68b0
|
/man/genomic_ranges_reduce.Rd
|
f144293b8ea6cdfdd35644fe344960d589522ab3
|
[] |
no_license
|
rkirk822/rnaseq
|
fc930785525d0f02ed02cdebedd311afb31e2bbf
|
31b5a460058af5055a9cf2f63841e295805b992d
|
refs/heads/master
| 2021-09-17T09:21:48.623668
| 2018-06-29T21:13:20
| 2018-06-29T21:13:20
| 121,541,806
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 920
|
rd
|
genomic_ranges_reduce.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/genomic_ranges_reduce.R
\name{genomic_ranges_reduce}
\alias{genomic_ranges_reduce}
\title{Reduce genomic ranges to set of non-overlapping ranges}
\usage{
genomic_ranges_reduce(inFile, outFile = NULL)
}
\arguments{
\item{inFile}{String - Name of bedfile containing genomic ranges}
\item{outFile}{String - Name to give output bedfile with non-overlapping genomic ranges}
}
\value{
reducedRanges
}
\description{
Given a bedfile containing genomic ranges, collapse overlapping ranges using the GenomicRanges package.
}
\details{
TIME: 10-20s, mostly to read the input file.
}
\examples{
bed1 = "/Users/nelsonlab/Documents/Results_temporarily_here/NucSeq_results/what_was_this/mm10_refSeq_exons_filtered_sorted.bed"
ranges = genomic_ranges_reduce(bed1, outFile="~/Documents/mm10_refSeq_exons_filtered_sorted_reduced.txt")
}
\author{
Emma Myers
}
|
495ad532f829e54eb519eed24b4189205a7e9436
|
496449f594ea62e002bd0b8d7d8e960d3a354e09
|
/Covid_analysis.R
|
9124159ef98aa1e95aa3208ccce177d56ecf9125
|
[] |
no_license
|
lanchett/Covid19
|
b12f965cc60aef7afddc19dd3f0bbd6be9a18407
|
fa4cb1389036cc59783059485c436639a1ab447c
|
refs/heads/master
| 2021-04-02T19:24:22.079848
| 2020-03-18T18:40:50
| 2020-03-18T18:40:50
| 248,311,798
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,192
|
r
|
Covid_analysis.R
|
library(tidyverse)
library(gganimate)
# Load data ---------------------------------------------------------------
data <- read.csv("http://cowid.netlify.com/data/full_data.csv")
data <-
data %>% replace_na(.,
list(
new_cases = 0,
new_deaths = 0,
total_cases = 0,
total_deaths = 0
)) %>%
filter(location != "Worldwide") %>%
mutate(date = as.Date(date)) %>% as_tibble()
# Prepare data ------------------------------------------------------------
data <- data %>% mutate(img = case_when(location == "Norway" ~ "NO",
location == "China" ~ "CN",
location == "Italy" ~ "IT",
location == "Thailand" ~ "TH",
location == "Australia" ~ "AU",
location == "Singapore" ~ "SG",
location == "France" ~ "FR",
location == "Japan"~ "JP",
location == "Malaysia" ~ "MY",
location == "South Korea" ~ "KR",
location == "United States" ~ "US",
location == "Vietnam" ~ "VN",
location == "Canada" ~ "CA",
location == "Germany" ~ "DE",
location == "Iran" ~ "IR",
location == "Bahrain" ~ "BH",
location == "Kuwait" ~ "KW",
location == "Spain" ~ "ES",
location == "United Kingdom" ~ "UK",
location == "Switzerland" ~ "CH",
location == "Netherlands" ~ "NL",
TRUE ~ "BZ"
),
link = paste0("https://www.countryflags.io/", img, "/shiny/64.png")
) %>%
filter(!location %in% c("China","World", "International"))
data_agg <- data %>%
group_by(date) %>%
mutate(frame_id = group_indices()) %>%
ungroup()
flourish_data <- data_agg %>% arrange(frame_id) %>%
select(location, frame_id, link, total_cases) %>%
pivot_wider(names_from = frame_id, values_from = total_cases, values_fill = list(total_cases = "0")) %>%
write.csv(file = "flourish.csv")
# Bar Chart Race (R-version) -----------------------------------------------------
# Not ready
data_agg <-
map_df(
unique(data_agg$frame_id),
~ data_agg %>% filter(frame_id == .x &
!location %in% c("China", "International")) %>%
arrange(-total_cases) %>%
mutate(
ordering = 1:nrow(.),
indicator = case_when(max(ordering) <= 10 ~ 1,
TRUE ~ 0)
)
) %>%
filter(indicator == 0 & ordering <= 10) %>%
arrange(frame_id, ordering)
data_use <- data_agg %>% mutate(curr_year = frame_id)
my_font <- 'Quicksand'
my_background <- 'antiquewhite'
my_pal <- c('#F8AFA8', '#74A089')
my_theme <- my_theme <- theme(
text = element_text(family = my_font),
rect = element_rect(fill = my_background),
plot.background = element_rect(fill = my_background, color = NA),
panel.background = element_rect(fill = my_background, color = NA),
panel.border = element_blank(),
plot.title = element_text(face = 'bold', size = 20),
plot.subtitle = element_text(size = 14),
panel.grid.major.y = element_blank(),
panel.grid.minor.y = element_blank(),
panel.grid.major.x = element_line(color = 'grey75'),
panel.grid.minor.x = element_line(color = 'grey75'),
legend.position = 'none',
plot.caption = element_text(size = 8),
axis.ticks = element_blank(),
axis.text.y = element_blank()
)
theme_set(theme_light() + my_theme)
ggplot(aes(ordering, group = location), data = data_use) +
geom_tile(aes(
y = total_cases / 2,
height = total_cases,
width = 0.9
), alpha = 0.9) +
scale_fill_manual(values = my_pal) +
geom_text(
aes(y = total_cases, label = location),
family = my_font,
nudge_y = -2,
size = 3
) +
geom_text(aes(y = total_cases, label = total_cases),
family = my_font,
nudge_y = 0.5) +
geom_text(
aes(
x = 1,
y = 18.75,
label = paste0(curr_year)
),
family = my_font,
size = 8,
color = 'gray45'
) +
coord_cartesian(clip = "off", expand = FALSE) +
coord_flip() +
labs(
title = 'Covid-19 Development (Outside China)',
subtitle = 'Development day-by-day after outbreak',
caption = 'data source: ourworldindata.org',
x = '',
y = ''
) +
transition_states(frame_id,
transition_length = 7,
state_length = 3) +
ease_aes('cubic-in-out')
#labs(title = "Date: {frame_time}")
|
d158a20e81b7864cc2742506117294c5626f9340
|
aa95ae81839d50fed3bf1009d894a12387cb6dd9
|
/DES_Koen/function UDR.R
|
cdcea2e9461572c2a9943fbb4a0fcf8183b7b3dd
|
[] |
no_license
|
cfbalmaceda/2020-DARTH-Advanced-Workshop
|
5c05c3e077249fb04734480892fb3e9db6e50d24
|
802a14cc82fb545fc574cb02fdd794b62b9fd299
|
refs/heads/master
| 2021-05-19T12:29:56.982966
| 2020-04-03T13:47:49
| 2020-04-03T13:47:49
| 251,699,488
| 0
| 0
| null | 2020-03-31T18:43:55
| 2020-03-31T18:43:54
| null |
UTF-8
|
R
| false
| false
| 4,046
|
r
|
function UDR.R
|
#########################################################################################################
##
## UDR Function
##
## Supplementary to the manuschipt:
##
## Comparing strategies for modeling competing risks in discrete event simulations:
## a simulation study and illustration in colorectal cancer
##
## by K. Degeling, H. Koffijberg, M.D. Franken, M. Koopman, M.J. IJzerman
##
## DOI:
##
## This code illustrates how the UDR approach, as defined in the paper, can be implemented in a general
## function in R Statistical Software. Depending on specific case study needs, modifications to the
## code may be required.
##
## The functions uses the 'fitdist' function, which is part of the 'fitdistrplus' package, to fit a
## parametrical distribution for reflecting the time-to-event evidence.
##
## Function inputs
## - data matrix or data.frame including two columns:
## > time: containing the time observations for the individuals
## > event: containing the event corresponding to 'time' observations
## - events vector containing the events that should be included in the analysis
## - dist the distribution to be fitted to the time-to-event data, as supported by the
## 'fitdist' function
##
## Function outputs
## - the function returns the fitted logistic regression model and time-to-event distribution parameter
## values in two list items: 'regression.model' and 'tte.dist', respectively
##
#########################################################################################################
## Function ----
UDR <- function(data, events, dist) {
## Initialization
# General checks for the provided input
if(!(is.data.frame(data) | is.matrix(data))) stop("The 'data' object should be a matrix or a data.frame!"); # The 'data' object is matrix or data.frame
if(!("time" %in% colnames(data))) stop("There is no 'time' column in 'data'!"); # A 'time' column should be present in the 'data' object
if(!(is.numeric(data[,"time"]))) stop("The provided values in the 'time' column are not numerical!"); # Values in the 'time' column should be numerical
if(any(is.na(data[,"time"]))) stop("NA values are present in the 'time' column!"); # No missing values are allowed in the 'time' column
if(any(data[,"time"]<=0)) stop("Negative values or zeros are present in the 'time' column!"); # No non-positive values are allowed in the 'time' column
if(!("event" %in% colnames(data))) stop("There is no 'event' column in 'data'!"); # An 'event' column should be present in the 'data' object
if(any(is.na(data[,"event"]))) stop("NA values are present in the 'event' column!"); # No missing values are allowed in the 'event' column
if(!all(events %in% data[,"event"])) stop("Not all events are present in the data!"); # All requested events in 'events' should be present in the 'event' column
if(!is.character(dist)) stop("The 'dist' input should be a character!"); # The value provided to 'dist' should be a character
# Load the 'fitdistrplus' package
library(fitdistrplus);
# Convert 'data' object into data.frame if necessary
if(is.matrix(data)) {
data <- as.data.frame(data);
}
# Make sure only the times and events of the included events are present
data <- subset(data, event %in% events);
# Make sure events are listed as factor for the analysis
if(!is.factor(data$event)) {
data$event <- as.factor(data$event);
}
## Estimate the parameter values
# Estimate the logistic regression model
regression.model <- glm(event~time, family=binomial(link='logit'), data=data);
# Estimate the distribution parameters
tte.dist <- fitdist(data$time, dist)$estimate;
## Return parameter values
return(list(regression.model=regression.model, tte.dist=tte.dist));
}
|
f8c89482da8f44788748445d9b3882f5805281c3
|
6d37064b9751e41020f1836b060054ce8b94bd56
|
/R/utils_text.R
|
bf9f84e16d20f9f283b4c4298d521d286c0dcb2f
|
[
"MIT"
] |
permissive
|
Chenyz03/pointblank
|
78c2a7fdbb48fba8798f6008fcd44a9301021cbc
|
51b4fd1a35b8d1368668171ef189e36d038096b5
|
refs/heads/master
| 2023-01-07T23:22:34.262264
| 2020-05-08T17:52:27
| 2020-05-08T17:52:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,471
|
r
|
utils_text.R
|
reporting_languages <- c("en", "fr", "de", "it", "es")
#
# Text for autobriefs
#
precondition_text <-
c(
"en" = "Precondition applied",
"fr" = "Condition pr\u00E9alable",
"de" = "Voraussetzung angewendet",
"it" = "Prerequisito applicato",
"es" = "Condici\u00F3n previa aplicada"
)
column_computed_text <-
c(
"en" = "computed column",
"fr" = "colonne calcul\u00E9e",
"de" = "berechnete Spalte",
"it" = "colonna calcolata",
"es" = "columna calculada"
)
values_text <-
c(
"en" = "and {num_omitted} more",
"fr" = "et {num_omitted} de plus",
"de" = "und {num_omitted} mehr",
"it" = "e altri {num_omitted}",
"es" = "y {num_omitted} m\u00E1s"
)
compare_expectation_text <-
c(
"en" = "Expect that values in {column_text} {column_computed_text} should be {operator} {values_text}.",
"fr" = "Attendez-vous \u00E0 ce que les valeurs de {column_text} {column_computed_text} soient {operator} {values_text}.",
"de" = "Erwarten Sie, dass die Werte in {column_text} {column_computed_text} {operator} {values_text} sein sollten.",
"it" = "Aspettatevi che i valori in {column_text} {column_computed_text} dovrebbero essere {operator} {values_text}.",
"es" = "Espere que los valores en {column_text} {column_computed_text} sean {operator} {values_text}."
)
in_set_expectation_text <-
c(
"en" = "Expect that values in {column_text} {column_computed_text} should be in the set of {values_text}.",
"fr" = "Attendez-vous \u00E0 ce que les valeurs de {column_text} {column_computed_text} soient dans l'ensemble de {values_text}.",
"de" = "Erwarten Sie, dass die Werte in {column_text} {column_computed_text} in der Menge von {values_text} enthalten sein sollten.",
"it" = "Aspettatevi che i valori in {column_text} {column_computed_text} siano nell'insieme di {values_text}.",
"es" = "Espere que los valores en {column_text} {column_computed_text} est\u00E9n en el conjunto de {values_text}."
)
not_in_set_expectation_text <-
c(
"en" = "Expect that values in {column_text} {column_computed_text} should not be in the set of {values_text}.",
"fr" = "Attendez-vous \u00E0 ce que les valeurs de {column_text} {column_computed_text} ne soient pas dans l'ensemble de {values_text}.",
"de" = "Erwarten Sie, dass die Werte in {column_text} {column_computed_text} nicht in der Menge von {values_text} enthalten sein sollten.",
"it" = "Aspettatevi che i valori in {column_text} {column_computed_text} non debbano essere nel set di {values_text}.",
"es" = "Espere que los valores en {column_text} {column_computed_text} no est\u00E9n en el conjunto de {values_text}."
)
between_expectation_text <-
c(
"en" = "Expect that values in {column_text} {column_computed_text} should be between {value_1} and {value_2}.",
"fr" = "Attendez-vous \u00E0 ce que les valeurs de {column_text} {column_computed_text} soient comprises entre {value_1} et {value_2}.",
"de" = "Erwarten Sie, dass die Werte in {column_text} {column_computed_text} zwischen {value_1} und {value_2} liegen sollten.",
"it" = "Aspettati che i valori in {column_text} {column_computed_text} siano compresi tra {value_1} e {value_2}.",
"es" = "Espere que los valores en {column_text} {column_computed_text} est\u00E9n entre {value_1} y {value_2}."
)
not_between_expectation_text <-
c(
"en" = "Expect that values in {column_text} {column_computed_text} should not be between {value_1} and {value_2}.",
"fr" = "Attendez-vous \u00E0 ce que les valeurs de {column_text} {column_computed_text} ne soient pas comprises entre {value_1} et {value_2}.",
"de" = "Erwarten Sie, dass die Werte in {column_text} {column_computed_text} nicht zwischen {value_1} und {value_2} liegen sollten.",
"it" = "Aspettatevi che i valori in {column_text} {column_computed_text} non debbano essere compresi tra {value_1} e {value_2}.",
"es" = "Espere que los valores en {column_text} {column_computed_text} no est\u00E9n entre {value_1} y {value_2}."
)
null_expectation_text <-
c(
"en" = "Expect that all values in {column_text} {column_computed_text} should be NULL.",
"fr" = "Attendez-vous \u00E0 ce que toutes les valeurs de {column_text} {column_computed_text} soient NULL.",
"de" = "Erwarten Sie, dass alle Werte in {column_text} {column_computed_text} NULL sein sollten.",
"it" = "Aspettatevi che tutti i valori in {column_text} {column_computed_text} siano NULL.",
"es" = "Espere que todos los valores en {column_text} {column_computed_text} sean NULL."
)
not_null_expectation_text <-
c(
"en" = "Expect that all values in {column_text} {column_computed_text} should not be NULL.",
"fr" = "Attendez-vous \u00E0 ce que toutes les valeurs de {column_text} {column_computed_text} ne soient pas NULL.",
"de" = "Erwarten Sie, dass alle Werte in {column_text} {column_computed_text} nicht NULL sein sollten.",
"it" = "Aspettatevi che tutti i valori in {column_text} {column_computed_text} non debbano essere NULL.",
"es" = "Espere que todos los valores en {column_text} {column_computed_text} no sean NULL."
)
regex_expectation_text <-
c(
"en" = "Expect that values in {column_text} {column_computed_text} should match the regular expression: {values_text}.",
"fr" = "Attendez-vous \u00E0 ce que les valeurs de {column_text} {column_computed_text} correspondent \u00E0 l'expression r\u00E9guli\u00E8re: {values_text}.",
"de" = "Erwarten Sie, dass die Werte in {column_text} {column_computed_text} mit dem regul\u00E4ren Ausdruck {values_text} \u00FCbereinstimmen.",
"it" = "Aspettati che i valori in {column_text} {column_computed_text} debbano corrispondere all'espressione regolare: {values_text}.",
"es" = "Espere que los valores en {column_text} {column_computed_text} coincidan con la expresi\u00F3n regular: {values_text}."
)
conjointly_expectation_text <-
c(
"en" = "Expect conjoint 'pass' units across the following expressions: {values_text}.",
"fr" = "Attendez-vous \u00E0 des unit\u00E9s de \u00ABpass\u00BB conjointes dans les expressions suivantes: {values_text}.",
"de" = "Erwarten Sie gemeinsame 'Pass'-Einheiten f\u00FCr die folgenden Ausdr\u00FCcke: {values_text}.",
"it" = "Aspettatevi unit\u00E1 'pass' congiunte tra le seguenti espressioni: {values_text}.",
"es" = "Espere unidades conjuntas de 'pass' en las siguientes expresiones: {values_text}."
)
col_exists_expectation_text <-
c(
"en" = "Expect that column {column_text} exists.",
"fr" = "Attendez-vous \u00E0 ce que la colonne {column_text} existe.",
"de" = "Erwarten Sie, dass die Spalte {column_text} vorhanden ist.",
"it" = "Aspettati che la colonna {column_text} esista.",
"es" = "Espere que exista la columna {column_text}."
)
col_is_expectation_text <-
c(
"en" = "Expect that column {column_text} is of type: {col_type}.",
"fr" = "Attendez-vous \u00E0 ce que la colonne {column_text} soit de type: {col_type}.",
"de" = "Erwarten Sie, dass die Spalte {column_text} vom Typ {col_type} ist.",
"it" = "Aspettati che la colonna {column_text} sia di tipo: {col_type}.",
"es" = "Espere que la columna {column_text} sea del tipo: {col_type}."
)
all_row_distinct_expectation_text <-
c(
"en" = "Expect entirely distinct rows across all columns.",
"fr" = "Attendez-vous \u00E0 des lignes enti\u00E8rement distinctes dans toutes les colonnes.",
"de" = "Erwarten Sie in allen Spalten v\u00F6llig unterschiedliche Zeilen.",
"it" = "Aspettati righe completamente distinte su tutte le colonne.",
"es" = "Espere filas completamente distintas en todas las columnas."
)
across_row_distinct_expectation_text <-
c(
"en" = "Expect entirely distinct rows across {column_text}.",
"fr" = "Attendez-vous \u00E0 des lignes enti\u00E8rement distinctes sur {column_text}.",
"de" = "Erwarten Sie v\u00F6llig unterschiedliche Zeilen in {column_text}.",
"it" = "Aspettati righe completamente distinte su {column_text}.",
"es" = "Espere filas completamente distintas en {column_text}."
)
col_schema_match_expectation_text <-
c(
"en" = "Expect that column schemas match.",
"fr" = "Attendez-vous \u00E0 ce que les sch\u00E9mas de colonnes correspondent.",
"de" = "Erwarten Sie, dass die Spaltenschemata \u00FCbereinstimmen.",
"it" = "Aspettati che gli schemi di colonna corrispondano.",
"es" = "Espere que los esquemas de columna coincidan."
)
#
# Text for agent report
#
pointblank_validation_title_text <-
c(
"en" = "Pointblank Validation",
"fr" = "Validation Pointblank",
"de" = "Pointblank-Validierung",
"it" = "Convalida Pointblank",
"es" = "Validaci\u00F3n de Pointblank"
)
pointblank_validation_plan_text <-
c(
"en" = "Pointblank Validation Plan",
"fr" = "Plan de validation de Pointblank",
"de" = "Pointblank-Validierungsplan",
"it" = "Piano di convalida Pointblank",
"es" = "Plan de validaci\u00F3n de Pointblank"
)
no_interrogation_performed_text <-
c(
"en" = "No Interrogation Performed",
"fr" = "Aucune interrogation effectu\u00E9e",
"de" = "Keine Abfrage durchgef\u00FChrt",
"it" = "Nessuna interrogazione eseguita",
"es" = "No se realizan interrogatorios"
)
report_col_step <-
c(
"en" = "STEP",
"fr" = "\u00C9TAPE",
"de" = "SCHRITT",
"it" = "INDICE",
"es" = "\u00CDNDICE"
)
report_col_steps <-
c(
"en" = "STEPS",
"fr" = "\u00C9TAPES",
"de" = "SCHRITTE",
"it" = "INDICI",
"es" = "\u00CDNDICES"
)
report_col_columns <-
c(
"en" = "COLUMNS",
"fr" = "COLONNES",
"de" = "SPALTEN",
"it" = "COLONNE",
"es" = "COLUMNAS"
)
report_col_values <-
c(
"en" = "VALUES",
"fr" = "VALEURS",
"de" = "WERTE",
"it" = "VALORI",
"es" = "VALORES"
)
report_col_units <-
c(
"en" = "UNITS",
"fr" = "UNIT\u00C9S",
"de" = "EINH.",
"it" = "UNIT\u00C0",
"es" = "UNIDADES"
)
report_column_schema <-
c(
"en" = "SCHEMA",
"fr" = "SCH\u00C9MA",
"de" = "SCHEMA",
"it" = "SCHEMA",
"es" = "ESQUEMA"
)
report_r_col_types <-
c(
"en" = "R TYPES",
"fr" = "TYPES R",
"de" = "R-TYPEN",
"it" = "TIPI R",
"es" = "TIPOS R"
)
report_r_sql_types <-
c(
"en" = "SQL TYPES",
"fr" = "TYPES SQL",
"de" = "SQL-TYPEN",
"it" = "TIPI SQL",
"es" = "TIPOS SQL"
)
|
922d14cdd04f2239bd12bc91f52f0b4bdeff09b9
|
4fa10361f4cb3a7e01618acd898db278ae9d3546
|
/METABRIC/METABRIC.Heatmap.CC.RNASeq.WHX.R
|
7252528636e55fb22f22d4438dd6e600fefa01d8
|
[] |
no_license
|
dudious/QCRI-SIDRA-ICR
|
2e8005e518a51ffba8a7c02adbf4ceb3206af3a2
|
c38d69b7eb523cb6f5e869d8a2220a13abeb4944
|
refs/heads/master
| 2021-04-18T23:35:44.960473
| 2019-05-22T09:04:38
| 2019-05-22T09:04:38
| 32,503,747
| 7
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,113
|
r
|
METABRIC.Heatmap.CC.RNASeq.WHX.R
|
#################################################################
###
### This Script Plots Heatmaps based on
### Consensus Clustering grouping of RNASeq Data
### from METABRIC
###
### Input data :
### ./3 ANALISYS/CLUSTERING/RNAseq/...
### Data is saved :
### NO DATA
### Figures are saved :
### ./4 FIGURES/Heatmaps
###
#################################################################
# Setup environment
rm(list=ls())
setwd("~/Dropbox/BREAST_QATAR/")
#Dependencies
required.packages <- c("gplots")
missing.packages <- required.packages[!(required.packages %in% installed.packages()[,"Package"])]
if(length(missing.packages)) install.packages(missing.packages)
library("gplots")
# Load Data
Geneset <- "16G.I" # SET GENESET HERE !!!!!!!!!!!!!!
K <- 4 # SET K here
Consensus.class <- read.csv(paste0("./3 ANALISYS/CLUSTERING/RNAseq/BRCA.METABRIC.DATA.1.k4.",Geneset,".reps1000/BRCA.METABRIC.DATA.1.k4.",Geneset,".reps1000.k=",K,".consensusClass.csv"),header=FALSE) # select source data
colnames (Consensus.class) <- c("PatientID","Group")
rownames(Consensus.class) <- Consensus.class[,1]
load (paste0("./2 DATA/SUBSETS/METABRIC.RNASEQ.DATA.1.genesubset.",Geneset,".RData"))
RNASeq.subset <- as.matrix(RNASEQ.DATA.1.subset)
#ordeing
RNASeq.subset <- cbind(RNASEQ.DATA.1.subset,rowMeans(RNASEQ.DATA.1.subset[, -ncol(RNASEQ.DATA.1.subset)]))
colnames(RNASeq.subset)[ncol(RNASeq.subset)] <- c("avg")
RNASeq.subset <- merge (RNASeq.subset,Consensus.class,by="row.names")
row.names(RNASeq.subset) <- RNASeq.subset$Row.names
RNASeq.subset$Row.names <- NULL
RNASeq.subset$PatientID <- NULL
RNASeq.subset <- RNASeq.subset[order(factor(RNASeq.subset$Group,levels = c(2,1,3,4)),RNASeq.subset$avg),] #order the Classification table by group then by average expression
RNASeq.subset$avg <- NULL
RNASeq.subset$Group <- NULL
Consensus.class<-Consensus.class[rownames(RNASeq.subset),]
rm(RNASEQ.DATA.1.subset)
# Heatmap
color.map <- function(Consensus.class) { if (Consensus.class=="4") "#FF0000" else "#0000FF" } #Set color scheme
patientcolors <- unlist(lapply(Consensus.class$Group, color.map)) #Aply color scheme to patients
my.palette <- colorRampPalette(c("blue", "white", "orange"))(n = 299)
my.colors = c(seq(-4,-0.5,length=100),seq(-0.5,1,length=100),seq(1,4,length=100))
png(paste0("./4 FIGURES/Heatmaps/METABRIC.DATA1.Heatmap.RNASeq.R1000.",Geneset,".k=",K,".png"),res=600,height=6,width=6,unit="in") # set filename
heatmap.2(t(RNASeq.subset),
main = paste0("Heatm.MBD1.RNASeq-",Geneset,".sel.K=",K),
col=my.palette,breaks=my.colors, #set color sheme RED High, GREEN low
ColSideColors=patientcolors, #set goup colors
key=TRUE,symm=FALSE,symkey=FALSE,symbreaks=TRUE,
scale="row", density.info="none", trace="none",
labCol=FALSE,cexRow=1.3,cexCol=0.1,margins=c(2,7),
Colv=FALSE)
par(lend = 1)
legend("topright",legend = c("group 3", "other groups"),
col = c("red", "blue"),lty= 1,lwd = 5)
dev.off()
|
60c15c0bcc58a444b8861f07e595af74abdc9d3c
|
34914a35bd83b5a587bab647e6e0fc892a119dd5
|
/Gillespie_Algorithm.R
|
26e8949913a73094813d97f03b390292ecf4f9ef
|
[] |
no_license
|
martind-hub/Gillespie-Algorithm
|
293f0e06243e048d2a268332865435d332393071
|
a3e8e1b8044e4a1e263bc2a2c89e8c5b07643b56
|
refs/heads/main
| 2023-07-14T08:39:46.069253
| 2021-08-30T18:08:38
| 2021-08-30T18:08:38
| 401,440,365
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,917
|
r
|
Gillespie_Algorithm.R
|
# import the necessarily libraries
library(ggplot2)
#################################################################################################
#### Gillespie algorithm ####
#################################################################################################
# define the function to output the results from gillespie algorithm
gillespie <- function(S, I, R, t, beta, r, endsim, stoptime) {
# set names for the transition states
states <- c("infected", "recovered")
# create vectors to store all states
Susceptible <- S
Infected <- I
Recovered <- R
transition_times <- t
counter <- 1
if (endsim == "yes") {
while ((beta*S*I + r*I) > 0 && (counter < stoptime)) {
# increase the counter
counter <- counter + 1
# calculate the time spent in this transition state
time_step <- rexp(1, rate=(beta*S*I + r*I))
# add it to the current time
transition_times <- c(transition_times, transition_times[length(transition_times)] + time_step)
# calculates probabilities
p <- beta*S*I/(beta*S*I + r*I)
q <- 1-p
# sample a transition
transition <- sample(states, 1, prob = c(p,q))
if (transition == "infected") {
S <- S - 1
I <- I + 1
Susceptible <- c(Susceptible, S)
Infected <- c(Infected, I)
Recovered <- c(Recovered, R)
} else {
I <- I - 1
if (I == 0) {
# restore back the original values and start again
S <- Susceptible[1]
Susceptible <- S
I <- Infected[1]
Infected <- I
R <- Recovered[1]
Recovered <- R
transition_times <- t
counter <- 1
next
}
R <- R + 1
Susceptible <- c(Susceptible, S)
Infected <- c(Infected, I)
Recovered <- c(Recovered, R)
}
}
} else {
while ((beta*S*I + r*I) > 0) {
# calculate the time spent in this transition state
time_step <- rexp(1, rate=(beta*S*I + r*I))
# add it to the current time
transition_times <- c(transition_times, transition_times[length(transition_times)] + time_step)
# calculates probabilities
p <- beta*S*I/(beta*S*I + r*I)
q <- 1-p
# sample a transition
transition <- sample(states, 1, prob = c(p,q))
if (transition == "infected") {
S <- S - 1
I <- I + 1
Susceptible <- c(Susceptible, S)
Infected <- c(Infected, I)
Recovered <- c(Recovered, R)
} else {
I <- I - 1
R <- R + 1
Susceptible <- c(Susceptible, S)
Infected <- c(Infected, I)
Recovered <- c(Recovered, R)
}
}
}
# return a list of values
return(list("Susceptible" = Susceptible, "Infected" = Infected, "Recovered" = Recovered, "time" = transition_times))
}
# run the algorithm
# set the random seed for reproducible results
set.seed(1234)
gil_output_fixed <- gillespie(S = 999, I = 1, R = 0, t = 0, beta = 0.05, r = 10, endsim = "no", stoptime = Inf)
# store the results in a data frame for plotting them with ggplot
gil_results <- data.frame(T = gil_output_fixed$time, S = gil_output_fixed$Susceptible, I = gil_output_fixed$Infected, R = gil_output_fixed$Recovered)
# plot the results
ggplot(data = gil_results, aes(x=T)) +
geom_line(aes(y=S, colour = "Susceptible")) +
geom_line(aes(y=I, colour = "Infectious")) +
geom_line(aes(y=R, colour = "Recovered")) +
scale_colour_manual("",
breaks = c("Susceptible", "Infectious", "Recovered"),
values = c("blue", "orange", "green")) +
theme(plot.title = element_text(hjust = 0.5)) +
labs(x = "Time", y = "Number of people")
#################################################################################################
#### Noisy observations ####
#################################################################################################
# run the gillespie algorithm
set.seed(1234)
gil_output_fixed <- gillespie(S = 999, I = 1, R = 0, t = 0, beta = 0.05, r = 10, endsim = "no", stoptime = Inf)
# create a function to generate noise
noise_gen <- function(gen_data, steps) {
# extract the final time
t_final <- gen_data$time[length(gen_data$time)]
# calculate the time step
time_step <- t_final/steps
# extract the closest time indices to time observations
time_line <- time_step
for (i in 1:(steps-20)) {
time_line <- c(time_line, time_line[i]+time_step)
}
indices <- NULL
for (i in 1:length(time_line)) {
print(i)
for (j in 1:length(gen_data$time)) {
if (gen_data$time[j] < time_line[i] && time_line[i] < gen_data$time[j+1]) {
indices <- c(indices, j)
break
}
}
}
# extract the number of true infected individuals at observations times
infected <- gen_data$Infected[indices]
# extract the exact times of the observations
inf_times <- gen_data$time[indices]
# generate noisy observations
noisy_obs <- NULL
for (i in 1:length(infected)) {
noisy_obs <- c(noisy_obs, rpois(1, lambda = infected[i]))
}
# return the noisy observations and the time axis
return(list("noisy" = noisy_obs, "time" = inf_times, "steps" = steps, "step" = time_step, "last_index" = indices[length(indices)]))
}
# generate noisy data
noise <- noise_gen(gil_output_fixed, 50)
# store the noisy data into a data frame
noise_frame <- data.frame(t = noise$time, n = noise$noisy)
# remove the times after the last noisy observation
noise_results <- data.frame(T = gil_output_fixed$time[1:noise$last_index], I = gil_output_fixed$Infected[1:noise$last_index])
# plot the true data with the generated noise points in red
ggplot(data = noise_results, aes(x=T)) +
geom_line(aes(y=I), col = "orange") +
geom_point(data=noise_frame, aes(x=t, y=n), col = "red", shape = 4, size = 2) +
labs(x = "Time", y = "Number of people")
|
aa5ef65937ec825285e36f901f6e3de86c29758e
|
266bca1ee343ddc3870dfd80a3acd12be6c6fc6d
|
/Summary.R
|
37e8c77d01ff7d401ca77c897d8e837fae3da0d8
|
[] |
no_license
|
MichaelStickels/Educational_Justice_Visualization_Project
|
be184a26995a1f929dd10674409d38ec452596b6
|
74a12339e8f8b190ba0f9c057845b44c0f2d7ab4
|
refs/heads/main
| 2023-03-25T01:54:52.862810
| 2021-03-14T20:21:00
| 2021-03-14T20:21:00
| 332,898,047
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,598
|
r
|
Summary.R
|
# File to calculate summary information about our dataframes
# Load in libraries
library(stringr)
library(tidyr)
library(dplyr)
# Load in tables
neighborhoods <- read.csv("https://raw.githubusercontent.com/MichaelStickels/Educational_Justice_Visualization_Project/main/Data/NCES%20Data/School%20Neighborhood%20Poverty%20Estimates%20(2015-2016)/EDGE_SIDE1216_PUBSCHS1516.csv")
economic <- read.csv("https://raw.githubusercontent.com/MichaelStickels/Educational_Justice_Visualization_Project/main/Data/NCES%20Data/Selected%20Economic%20Characteristics%20of%20Relevant%20Children%20Enrolled%20(Public%20and%20Private)%20(2014-2018)/CDP03_104_USSchoolDistrictAll.csv")
comparablewageindex <- read.csv("https://raw.githubusercontent.com/MichaelStickels/Educational_Justice_Visualization_Project/main/Data/NCES%20Data/Comparable%20Wage%20Index%20for%20Teachers%20(2016)/EDGE_ACS_CWIFT2016_County.csv")
yaledata <- read.csv("https://raw.githubusercontent.com/MichaelStickels/Educational_Justice_Visualization_Project/main/Data/Yale%20Climate%20Opintion%20Data/YCOM_2020_Data.csv")
# Function that takes in dataset and returns a list of info about it
summary_info <- list()
# Calculate number of counties in the Comparable Wage Index for Teachers
total_counties <- nrow(comparablewageindex)
# Calculate number of neighborhoods in neighborhoods
num_neighborhoods <- nrow(neighborhoods)
# Calculate number of school districts in economic
num_school_districts <- nrow(economic)
# Calculate number of columns and rows in Yale Data
num_locations <- nrow(yaledata)
# Calculate the mean amount of people who support CO2 emission limits
support_co2_limits <- sprintf("%0.1f%%", yaledata %>%
summarize(CO2limits = mean(CO2limits)) %>%
pull(CO2limits))
# Calculate the average amount of citizens that want there to be
# education for global warming in schools
for_global_warming_edu <- sprintf("%0.1f%%", yaledata %>%
summarize(teachGW = mean(teachGW)) %>%
pull(teachGW))
# Calculate neighborhood with lowest poverty ratio
max_pov_neighborhood <- neighborhoods %>%
filter(IPR_EST == max(IPR_EST)) %>%
head(1) %>%
pull(IPR_EST)
# Calculate neighborhood with highest poverty ratio
min_pov_neighborhood <- neighborhoods %>%
filter(IPR_EST == min(IPR_EST)) %>%
pull(IPR_EST)
# Calculate mean poverty rate of neighborhoods
mean_pov_rate <- neighborhoods %>%
summarize(IPR_EST = mean(IPR_EST)) %>%
pull(IPR_EST)
# Calculate average Comparable Wage Index for Teachers
mean_comp_wage <- comparablewageindex %>%
summarize(CNTY_CWIFTEST = mean(CNTY_CWIFTEST)) %>%
pull(CNTY_CWIFTEST)
|
cbdbb3d8f9e7a1b9e6cc2665a27b727d1e706297
|
3d1a05335aa4c19461591d7d5895a17c43124208
|
/ANOVA/boxplot_haricot_dbca.R
|
67c1f1ee1b45467f83a770a4c18fbbfc180316f3
|
[] |
no_license
|
rodney-davermann/FDSEG-CH-Statistiques-g-n-rales-
|
8cc0f3b58fa40b9df98924a1230b3b48fb2dc14c
|
f718b10f7afc4a8ca6e57e6c31b3aaaac3955e3d
|
refs/heads/master
| 2021-01-01T06:50:44.409330
| 2015-10-02T03:13:07
| 2015-10-02T03:13:07
| 42,944,380
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 253
|
r
|
boxplot_haricot_dbca.R
|
# Read the data
library(xlsx)
haricot<-read.xlsx("C://R_Training//anova_haricot_dbca.xls", sheetIndex=1,header=TRUE)
# Boxplot for All combined blocks
boxplot(Rendement~Variete, col=c("green","yellow","red"), vertical=T, main="Rendement par Variete")
|
ddbc98a9bb4dc6f1470e907c53d4dc56803954e2
|
506ba5bd7966e619d0c8df79cc640375b1c6c96f
|
/FAOSTAT/R/FAOSTAT-package.R
|
2602754f1c7e2d4f9385826cc3a6e2120b0ca44c
|
[] |
no_license
|
sebastian-c/FAOSTATpackage
|
7627982608001a43eb5f0faf6a5d25235292047e
|
c4dd5f4a7c38eced35fbb94580b0aba6921dd809
|
refs/heads/master
| 2023-04-09T00:52:13.050968
| 2017-01-30T16:20:02
| 2017-01-30T16:20:02
| 96,107,563
| 0
| 0
| null | 2017-07-03T12:17:42
| 2017-07-03T12:17:42
| null |
UTF-8
|
R
| false
| false
| 442
|
r
|
FAOSTAT-package.R
|
##' Search and extract data from FAOSTAT database of the Food and
##' Agricultural Organization of the United Nations.
##'
##' @name FAOSTAT-package
##' @docType package
##' @title Search and extract data from FAOSTAT database of the Food and
##' Agricultural Organization of the United Nations.
##' @author Michael. C. J. Kao \email{michael.kao@@fao.org}
##' @keywords package
##' @import plyr
##' @import data.table
##'
NULL
|
1d1c0dc50abbd87740722e294540e2587aba31ef
|
fac1fe26e6b58e32c3797ef206d803a301403f2b
|
/inst/run_analysis.R
|
9c673ec581e58d9654db17dbbb4e5760409291be
|
[
"WTFPL"
] |
permissive
|
n0542344/coolmlproject
|
ded3afc3f27e7fdbc4b0d0597b411fb00120712b
|
2a903cdff8699ea8123b73b36cadd6cdcdf24b2c
|
refs/heads/master
| 2022-06-28T08:02:15.255459
| 2020-05-13T07:40:34
| 2020-05-13T07:40:34
| 263,478,738
| 0
| 0
|
WTFPL
| 2020-05-12T23:50:01
| 2020-05-12T23:50:00
| null |
UTF-8
|
R
| false
| false
| 1,306
|
r
|
run_analysis.R
|
## load (and, if not present, install) necessary packages
if (!require("pacman")) install.packages("pacman"); library(pacman)
pacman::p_load(coolmlproject,
dplyr,
drake,
parsnip,
dials,
tune)
# pacman::p_load(tidymodels)
plan <- drake_plan(
datasets = get_data(),
training_set = datasets$data_train,
testing_set = datasets$data_test,
training_splits = rsample::vfold_cv(training_set, v = 3),
preprocessed = preprocess(training_set),
boosted_trees_model = define_model(boost_tree,
"xgboost",
"classification",
mtry = tune(),
tree = tune(),
tree_depth = tune()),
boost_grid = boosted_trees_model %>%
define_grid(predictor_data = select(training_set,
-target),
grid_max_entropy,
size = 5),
boost_wflow = define_wflow(preprocessed,
boosted_trees_model),
tuned_boosted_trees = tune_grid(boost_wflow,
training_splits,
boost_grid)
)
make(plan)
|
17076b981f8acc01f4d667a3ae3c54bd051f77ac
|
56b32941415e9abe063d6e52754b665bf95c8d6a
|
/R-Portable/App/R-Portable/library/igraph/tests/test_count.multiple.R
|
e1bbf372265452aad0d0af357138ee207f1a3a64
|
[
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-newlib-historical",
"GPL-2.0-or-later",
"MIT"
] |
permissive
|
voltek62/seo-viz-install
|
37ed82a014fc36e192d9a5e5aed7bd45327c8ff3
|
e7c63f4e2e4acebc1556912887ecd6a12b4458a0
|
refs/heads/master
| 2020-05-23T08:59:32.933837
| 2017-03-12T22:00:01
| 2017-03-12T22:00:01
| 84,758,190
| 1
| 0
|
MIT
| 2019-10-13T20:51:49
| 2017-03-12T21:20:14
|
C++
|
UTF-8
|
R
| false
| false
| 1,285
|
r
|
test_count.multiple.R
|
context("count_multiple")
test_that("count_multiple works", {
library(igraph)
set.seed(42)
g <- barabasi.game(10, m=3, algorithm="bag")
im <- which_multiple(g)
cm <- count_multiple(g)
expect_that(im, equals(c(FALSE, TRUE, TRUE, FALSE, TRUE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE,
FALSE, FALSE, TRUE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, TRUE)))
expect_that(cm, equals(c(3, 3, 3, 3, 3, 3, 1, 1, 1, 2, 1, 2, 1, 2,
2, 2, 1, 2, 1, 1, 1, 1, 1, 1, 2, 1, 2)))
expect_that(count_multiple(simplify(g)),
equals(rep(1, ecount(simplify(g)))))
## Direction of the edge is important
expect_that(which_multiple(graph( c(1,2, 2,1) )), equals(c(FALSE, FALSE)))
expect_that(which_multiple(graph( c(1,2, 2,1), dir=FALSE )),
equals(c(FALSE, TRUE)))
## Remove multiple edges but keep multiplicity
g <- barabasi.game(10, m=3, algorithm="bag")
E(g)$weight <- 1
g <- simplify(g)
expect_that(any(which_multiple(g)), is_false())
expect_that(E(g)$weight, equals(c(3, 2, 1, 2, 1, 3, 2, 1, 2, 1, 2,
1, 1, 1, 1, 1, 1, 1)))
})
|
98139761f6d82853684155f899d14b9ff650c0d9
|
37ac806c710d7d9c81e5d55900c65bd11283407f
|
/man/get_head.Rd
|
a2ebb2f6f4bbd4ed74fc74b2727bd0d8b7941abe
|
[
"MIT"
] |
permissive
|
dirkschumacher/transduceR
|
14dc05afb931041b9819cf99dceb7cdb760378f7
|
3ac0bc2b3e119bda274f36f7c8318fe4f987b14a
|
refs/heads/master
| 2020-09-12T22:21:36.069370
| 2016-09-19T17:29:19
| 2016-09-19T17:29:19
| 35,349,106
| 35
| 0
| null | 2016-09-19T17:29:20
| 2015-05-09T23:17:38
|
R
|
UTF-8
|
R
| false
| true
| 304
|
rd
|
get_head.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sequence.R
\name{get_head}
\alias{get_head}
\title{Returns the first element of a collection.}
\usage{
get_head(sequence)
}
\arguments{
\item{sequence}{a sequence}
}
\description{
Returns the first element of a collection.
}
|
be2b2a9fd64d7bbef9fdac288028ff1b9e4b6f7d
|
33f510378be81a8840c5ad39d20456454d98386c
|
/pkg/randtoolbox/tests/test-runifInterface.R
|
d39374f27fc8425bf33efcb8a8b0491be521dd8a
|
[] |
no_license
|
sstoeckl/rmetrics
|
41ebe1d0be6ec19cac02dbe2501551f0e1416698
|
dd766277b5891415c514039ac2da0351d86b7c8b
|
refs/heads/master
| 2020-03-31T17:26:51.225928
| 2018-10-10T12:57:30
| 2018-10-10T12:57:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,386
|
r
|
test-runifInterface.R
|
library(randtoolbox)
RNGkind()
#see e.g. https://en.wikipedia.org/wiki/Linear_congruential_generator
#Park Miller congruential generator
set.generator(name="congruRand", mod=2^31-1, mult=16807, incr=0, seed=12345)
get.description()
runif(5)
setSeed(12345)
congruRand(5, dim=1, mod=2^31-1, mult=16807, incr=0)
RNGkind()
# the Knuth Lewis RNG
4294967296 == 2^32
set.generator(name="congruRand", mod="4294967296", mult="1664525", incr="1013904223", seed=1)
runif(5)
setSeed(1)
congruRand(5, dim=1, mod=4294967296, mult=1664525, incr=1013904223)
# the POSIX rand48
281474976710656 == 2^48
set.generator(name="congruRand", mod="281474976710656", mult="25214903917", incr="11", seed=1)
runif(5)
setSeed(1)
congruRand(5, dim=1, mod=281474976710656, mult=25214903917, incr=11)
# the NMIX RNG by Donald Knuth
18446744073709551616 == 2^64
set.generator(name="congruRand", mod="18446744073709551616", mult="1442695040888963407", incr="1013904223", seed=1)
runif(5)
#first value is
(1442695040888963407 * 1 + 1013904223) / 2^64
setSeed(1)
congruRand(5, dim=1, mod=18446744073709551616, mult=1442695040888963407, incr=1013904223)
#Haynes RNG
set.generator(name="congruRand", mod="18446744073709551616", mult="636412233846793005", incr="1", seed=1)
res <- get.description()
runif(1)
setSeed(1)
congruRand(2, dim=1, mod=18446744073709551616, mult=636412233846793005, incr=1, echo = TRUE)
|
9234532fa4f8118004d5baaa5933703095d6441e
|
b786c156f778c28a2f78ed979923d144390beedd
|
/figure scripts/trophicTransferFAbarplot.R
|
a6378b7562b9281fc4b012cdb83334111056c08f
|
[] |
no_license
|
pkelly13/bugEmergenceProject
|
fc905b521b1a7d7ee3f4e7c702d5eb8d773f2970
|
57facae093a5aba1f5a2ddca7647113306e4341f
|
refs/heads/master
| 2020-04-16T12:54:33.023047
| 2014-08-26T13:16:12
| 2014-08-26T13:16:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,563
|
r
|
trophicTransferFAbarplot.R
|
#Bar plot figure of either absolute concnetration of fatty acids, or % fatty acids depending on how much of the tsGrowth_and_PCAplots script is run. Source the scrip to run % of total FAs
#PTK 11 June 2014
#set working directory
setwd('~/bugEmergenceProject')
#source script
source('tsGrowth_and_PCAplots.R')
#save plot as a pdf to potential figures folder
setwd('~/bugEmergenceProject/potential figures')
pdf('fattyAcidTrophicTransfer_barplot.pdf',width=11,height=8.5)
par(mfrow=c(2,2))
table=epa.table
se.table=se.epa.table
barplot(table,beside=T,ylim=c(0,max(se.table+table)),main='EPA',legend=c('Seston','Insects','Bolus','Nestlings'),args.legend=list(x=6.5,y=14,cex=1,bty='n'))
error.bar(1.5,table[1,1],upper=se.table[1,1],lower=0,length=0.04)
error.bar(2.5,table[2,1],upper=se.table[2,1],lower=0,length=0.04)
error.bar(3.5,table[3,1],upper=se.table[3,1],lower=0,length=0.04)
error.bar(4.5,table[4,1],upper=se.table[4,1],lower=0,length=0.04)
error.bar(6.5,table[1,2],upper=se.table[1,2],lower=0,length=0.04)
error.bar(7.5,table[2,2],upper=se.table[2,2],lower=0,length=0.04)
error.bar(8.5,table[3,2],upper=se.table[3,2],lower=0,length=0.04)
error.bar(9.5,table[4,2],upper=se.table[4,2],lower=0,length=0.04)
error.bar(11.5,table[1,3],upper=se.table[1,3],lower=0,length=0.04)
error.bar(12.5,table[2,3],upper=se.table[2,3],lower=0,length=0.04)
error.bar(13.5,table[3,3],upper=se.table[3,3],lower=0,length=0.04)
error.bar(14.5,table[4,3],upper=se.table[4,3],lower=0,length=0.04)
error.bar(16.5,table[1,4],upper=se.table[1,4],lower=0,length=0.04)
error.bar(17.5,table[2,4],upper=se.table[2,4],lower=0,length=0.04)
error.bar(18.5,table[3,4],upper=se.table[3,4],lower=0,length=0.04)
error.bar(19.5,table[4,4],upper=se.table[4,4],lower=0,length=0.04)
error.bar(21.5,table[1,5],upper=se.table[1,5],lower=0,length=0.04)
error.bar(22.5,table[2,5],upper=se.table[2,5],lower=0,length=0.04)
error.bar(23.5,table[3,5],upper=se.table[3,5],lower=0,length=0.04)
error.bar(24.5,table[4,5],upper=se.table[4,5],lower=0,length=0.04)
error.bar(26.5,table[1,6],upper=se.table[1,6],lower=0,length=0.04)
error.bar(27.5,table[2,6],upper=se.table[2,6],lower=0,length=0.04)
error.bar(28.5,table[3,6],upper=se.table[3,6],lower=0,length=0.04)
error.bar(29.5,table[4,6],upper=se.table[4,6],lower=0,length=0.04)
table<-dha.table
se.table<-se.dha.table
barplot(table,beside=T,ylim=c(0,max(se.table+table)),main='DHA')
error.bar(1.5,table[1,1],upper=se.table[1,1],lower=0,length=0.04)
error.bar(2.5,table[2,1],upper=se.table[2,1],lower=0,length=0.04)
error.bar(3.5,table[3,1],upper=se.table[3,1],lower=0,length=0.04)
error.bar(4.5,table[4,1],upper=se.table[4,1],lower=0,length=0.04)
error.bar(6.5,table[1,2],upper=se.table[1,2],lower=0,length=0.04)
error.bar(7.5,table[2,2],upper=se.table[2,2],lower=0,length=0.04)
error.bar(8.5,table[3,2],upper=se.table[3,2],lower=0,length=0.04)
error.bar(9.5,table[4,2],upper=se.table[4,2],lower=0,length=0.04)
error.bar(11.5,table[1,3],upper=se.table[1,3],lower=0,length=0.04)
error.bar(12.5,table[2,3],upper=se.table[2,3],lower=0,length=0.04)
error.bar(13.5,table[3,3],upper=se.table[3,3],lower=0,length=0.04)
error.bar(14.5,table[4,3],upper=se.table[4,3],lower=0,length=0.04)
error.bar(16.5,table[1,4],upper=se.table[1,4],lower=0,length=0.04)
error.bar(17.5,table[2,4],upper=se.table[2,4],lower=0,length=0.04)
error.bar(18.5,table[3,4],upper=se.table[3,4],lower=0,length=0.04)
error.bar(19.5,table[4,4],upper=se.table[4,4],lower=0,length=0.04)
error.bar(21.5,table[1,5],upper=se.table[1,5],lower=0,length=0.04)
error.bar(22.5,table[2,5],upper=se.table[2,5],lower=0,length=0.04)
error.bar(23.5,table[3,5],upper=se.table[3,5],lower=0,length=0.04)
error.bar(24.5,table[4,5],upper=se.table[4,5],lower=0,length=0.04)
error.bar(26.5,table[1,6],upper=se.table[1,6],lower=0,length=0.04)
error.bar(27.5,table[2,6],upper=se.table[2,6],lower=0,length=0.04)
error.bar(28.5,table[3,6],upper=se.table[3,6],lower=0,length=0.04)
error.bar(29.5,table[4,6],upper=se.table[4,6],lower=0,length=0.04)
table<-w3.table
se.table<-se.w3.table
barplot(table,beside=T,ylim=c(0,max(se.table+table)),main=expression(paste(omega,'3')))
error.bar(1.5,table[1,1],upper=se.table[1,1],lower=0,length=0.04)
error.bar(2.5,table[2,1],upper=se.table[2,1],lower=0,length=0.04)
error.bar(3.5,table[3,1],upper=se.table[3,1],lower=0,length=0.04)
error.bar(4.5,table[4,1],upper=se.table[4,1],lower=0,length=0.04)
error.bar(6.5,table[1,2],upper=se.table[1,2],lower=0,length=0.04)
error.bar(7.5,table[2,2],upper=se.table[2,2],lower=0,length=0.04)
error.bar(8.5,table[3,2],upper=se.table[3,2],lower=0,length=0.04)
error.bar(9.5,table[4,2],upper=se.table[4,2],lower=0,length=0.04)
error.bar(11.5,table[1,3],upper=se.table[1,3],lower=0,length=0.04)
error.bar(12.5,table[2,3],upper=se.table[2,3],lower=0,length=0.04)
error.bar(13.5,table[3,3],upper=se.table[3,3],lower=0,length=0.04)
error.bar(14.5,table[4,3],upper=se.table[4,3],lower=0,length=0.04)
error.bar(16.5,table[1,4],upper=se.table[1,4],lower=0,length=0.04)
error.bar(17.5,table[2,4],upper=se.table[2,4],lower=0,length=0.04)
error.bar(18.5,table[3,4],upper=se.table[3,4],lower=0,length=0.04)
error.bar(19.5,table[4,4],upper=se.table[4,4],lower=0,length=0.04)
error.bar(21.5,table[1,5],upper=se.table[1,5],lower=0,length=0.04)
error.bar(22.5,table[2,5],upper=se.table[2,5],lower=0,length=0.04)
error.bar(23.5,table[3,5],upper=se.table[3,5],lower=0,length=0.04)
error.bar(24.5,table[4,5],upper=se.table[4,5],lower=0,length=0.04)
error.bar(26.5,table[1,6],upper=se.table[1,6],lower=0,length=0.04)
error.bar(27.5,table[2,6],upper=se.table[2,6],lower=0,length=0.04)
error.bar(28.5,table[3,6],upper=se.table[3,6],lower=0,length=0.04)
error.bar(29.5,table[4,6],upper=se.table[4,6],lower=0,length=0.04)
table=pufa.table
se.table=se.pufa.table
barplot(pufa.table,beside=T,ylim=c(0,max(se.pufa.table+pufa.table)),main='PUFA')
error.bar(1.5,table[1,1],upper=se.table[1,1],lower=0,length=0.04)
error.bar(2.5,table[2,1],upper=se.table[2,1],lower=0,length=0.04)
error.bar(3.5,table[3,1],upper=se.table[3,1],lower=0,length=0.04)
error.bar(4.5,table[4,1],upper=se.table[4,1],lower=0,length=0.04)
error.bar(6.5,table[1,2],upper=se.table[1,2],lower=0,length=0.04)
error.bar(7.5,table[2,2],upper=se.table[2,2],lower=0,length=0.04)
error.bar(8.5,table[3,2],upper=se.table[3,2],lower=0,length=0.04)
error.bar(9.5,table[4,2],upper=se.table[4,2],lower=0,length=0.04)
error.bar(11.5,table[1,3],upper=se.table[1,3],lower=0,length=0.04)
error.bar(12.5,table[2,3],upper=se.table[2,3],lower=0,length=0.04)
error.bar(13.5,table[3,3],upper=se.table[3,3],lower=0,length=0.04)
error.bar(14.5,table[4,3],upper=se.table[4,3],lower=0,length=0.04)
error.bar(16.5,table[1,4],upper=se.table[1,4],lower=0,length=0.04)
error.bar(17.5,table[2,4],upper=se.table[2,4],lower=0,length=0.04)
error.bar(18.5,table[3,4],upper=se.table[3,4],lower=0,length=0.04)
error.bar(19.5,table[4,4],upper=se.table[4,4],lower=0,length=0.04)
error.bar(21.5,table[1,5],upper=se.table[1,5],lower=0,length=0.04)
error.bar(22.5,table[2,5],upper=se.table[2,5],lower=0,length=0.04)
error.bar(23.5,table[3,5],upper=se.table[3,5],lower=0,length=0.04)
error.bar(24.5,table[4,5],upper=se.table[4,5],lower=0,length=0.04)
error.bar(26.5,table[1,6],upper=se.table[1,6],lower=0,length=0.04)
error.bar(27.5,table[2,6],upper=se.table[2,6],lower=0,length=0.04)
error.bar(28.5,table[3,6],upper=se.table[3,6],lower=0,length=0.04)
error.bar(29.5,table[4,6],upper=se.table[4,6],lower=0,length=0.04)
mtext('Site',side=1,cex=1.4,line=-1.5,outer=T)
mtext('% of total FAs',side=2,cex=1.4,line=-1.5,outer=T)
dev.off()
|
7e8b536564b72dffb41312013b53f9fecc36778d
|
69630a75fb71b75a1abd21f74b8a811533f7cab4
|
/man/total_sales.Rd
|
d5c9ec7d1b4a7cb56faf1672d3ae8a7f3a867f60
|
[] |
no_license
|
codeclan/CodeClanData
|
aa0cd21aea3390d3629ab7ebbd53543623d92941
|
e3965c596a0439e125b4643412bd434a54266da8
|
refs/heads/master
| 2023-03-16T06:52:38.769843
| 2023-03-10T09:02:43
| 2023-03-10T09:02:43
| 190,196,169
| 5
| 8
| null | 2019-08-03T10:46:43
| 2019-06-04T12:24:19
|
R
|
UTF-8
|
R
| false
| true
| 386
|
rd
|
total_sales.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/documentation.R
\docType{data}
\name{total_sales}
\alias{total_sales}
\title{Total sales}
\format{
A data frame
}
\usage{
total_sales
}
\description{
An invented dataset with total sales across 7 branches.
}
\details{
A monthly breakdown of this dataset can be found in `monthly_sales`.
}
\keyword{datasets}
|
8675eccb8e38598ba64781cbcb49ef793f0a22a4
|
0a4a2054c304ac1aa473a0c29c6eb9df7fc26275
|
/scripts/locus-multispecies-ch-tables.R
|
2f00b676447c07bbde057768fbec90bdd55dfe75
|
[] |
no_license
|
willbradshaw/thesis
|
1039cb1cee95b4159de2cea502a4f2130a91c626
|
ad7e65ec88bdb72f2f560b0f409cf8c38d8239c6
|
refs/heads/master
| 2022-11-17T08:20:04.157046
| 2020-06-27T17:55:59
| 2020-06-27T17:55:59
| 250,043,773
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,485
|
r
|
locus-multispecies-ch-tables.R
|
###############################################################################
## FIGURE ##
## Multispecies C-region maps ##
###############################################################################
aux_dir <- snakemake@params[["aux"]]
source(file.path(aux_dir, "aux.R"))
parse_snakemake(snakemake)
write_log("Parsed global Snakemake properties.")
write_log("Loaded packages and auxiliary functions.")
#------------------------------------------------------------------------------
# READ TABLE
#------------------------------------------------------------------------------
tab <- suppressMessages(read_csv(inpath)) %>%
mutate(`\\textbf{Comments}` = gsub("&", "\\&", `\\textbf{Comments}`))
#------------------------------------------------------------------------------
# WRITE TABLES
#------------------------------------------------------------------------------
align_string <- paste0(">{\\italic}",
paste(rep("l", ncol(tab)),collapse = ""))
# Split table to fit on page
rows_per_tab <- 25
tab_split_at <- seq(0, ceiling(nrow(tab)/rows_per_tab)*rows_per_tab,
rows_per_tab)
row_n_tab <- length(tab_split_at) - 1
for (n in 1:row_n_tab){
assign(paste0("tab_", n), tab[(tab_split_at[n]+1):tab_split_at[n+1],])
savetab(get(paste0("tab_", n)), get(paste0("outpath_ch", n)),
align = align_string)
}
|
eb63df9ced6058d61409966dc22ee993971f5f4d
|
5c618b59cc2ac45e48c05bb24d2e56be4e27077c
|
/models/contamination/paper/code/Step4_mask_contamination_hindcasts.R
|
a15eb0d4e33b5aaa5b0bfd362a508642a529ab0f
|
[] |
no_license
|
cfree14/dungeness
|
fefcd5e256e0f8fe4721fbd1b627942e74704b5b
|
76054741b1209078e92ce2cc543620023900ab6d
|
refs/heads/master
| 2023-08-29T04:43:00.107409
| 2023-08-08T18:21:41
| 2023-08-08T18:21:41
| 189,051,316
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,549
|
r
|
Step4_mask_contamination_hindcasts.R
|
# Clear workspace
rm(list = ls())
# Setup
################################################################################
# Packages
library(sf)
library(zoo)
library(caret)
library(raster)
library(tidyverse)
library(tidymodels)
library(lubridate)
# Directories
datadir <- "models/contamination/paper/data"
outputdir <- "models/contamination/paper/output"
# Read species ranges
ranges <- readRDS(file.path(datadir, "species_ranges.Rds"))
# Files with predictions
list.files(outputdir, "predictions.gri")
# Mask predictions
################################################################################
# Best models
best_models <- c("dungeness_crab_model_rf_cda.Rds",
"rock_crab_model_rf_pda.Rds",
"spiny_lobster_model_rf_pda.Rds",
"razor_clam_model_rf_cda.Rds")
# Loop through models
i <- 1
for(i in 1:length(best_models)){
# Load predictions
print(i)
infile <- best_models[i] %>% gsub(".Rds", "", .) %>% paste0(., "_predictions.grd")
preds <- brick(file.path(outputdir, infile))
# Mask by range
spp <- gsub("_model_rf_cda_predictions.grd|_model_rf_pda_predictions.grd", "", infile) %>% gsub("_", " ", .) %>% stringr::str_to_sentence()
srange <- ranges %>%
filter(species==spp)
# Mask by 100 fathoms
preds_mask <- raster::mask(x=preds, mask=srange)
# Export
outfile_preds_masked <- paste0(gsub(".Rds", "", best_models[i]), "_predictions_range_mask.grd")
writeRaster(preds_mask, file.path(outputdir, outfile_preds_masked), overwrite=T)
}
|
02e0de6b737d2f5448612c1fcf44b27572efef0c
|
cc2e4b5a9a57396a5b3d32100c92dbc630c59dde
|
/Part1/4회/분석 결과 정리와 공유.R
|
e985f5dd6c5051a7f53a543cebbcaeb6daa803a5
|
[] |
no_license
|
hellogurney/RTong
|
d63530f0fe282ae86276cf399dd4182240e4f0f1
|
2ccbb5740326d85616f4c2216b023fe4e059cf8b
|
refs/heads/master
| 2021-04-09T16:34:42.622912
| 2018-09-14T07:40:01
| 2018-09-14T07:40:01
| 125,618,783
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 441
|
r
|
분석 결과 정리와 공유.R
|
#### 분석 결과 정리와 공유 ####
# 따라하며 배우는 데이터 과학 12장 PPT: https://goo.gl/mzAGqT
#### Markdown (마크다운) ####
# 마크다운이란 ? : https://goo.gl/fD10fA
# 마크다운 문법 : https://goo.gl/hz7Dt1
# 마크다운 연습 : http://markdownlivepreview.com/
#### R Markdown ####
# cheat sheet : https://goo.gl/uUHJq6
# ex : https://rpubs.com/jmhome/datatype_analysis
# RPub : https://goo.gl/6oQRJa
|
c7cc1db3d0120d034f7889f9f869db714e461cef
|
09c564464713ca4de570a098810f197a0da0edd8
|
/man/option_list.Rd
|
a6e0f9fcd1cc5789d55e5e7f0adbf85258d93d45
|
[] |
no_license
|
thigm85/RBi
|
40ccc7caed9cc1ac70e8591f58a626504b2691e8
|
d4781208f109fb6792c37627df9cc61da258602d
|
refs/heads/master
| 2020-03-15T07:54:37.032268
| 2018-06-11T19:03:54
| 2018-06-11T19:03:54
| 132,039,973
| 0
| 0
| null | 2018-06-11T18:55:52
| 2018-05-03T19:21:50
|
R
|
UTF-8
|
R
| false
| true
| 414
|
rd
|
option_list.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util_option_list.R
\name{option_list}
\alias{option_list}
\title{Convert string to option list}
\usage{
option_list(...)
}
\arguments{
\item{...}{any number of strings to convert}
}
\value{
option list
}
\description{
This function is used to convert an option string into a list of options. If a list is given, it will be kept as is
}
|
933f1fffe4d4733eeebc5378b898f189b0d7a14a
|
8cdda276bac8b6681385b03fb902f115ad4acae2
|
/R/hypoexpRuinprob.R
|
bb51ff3395eb3b36736c55ff0cad8340cedaad8d
|
[] |
no_license
|
cran/sdprisk
|
4fd9eb860a6f07ed4772af181915836b35f8fea8
|
8a30ea3963f7ab74ac21fbe3a2c152f9e805839c
|
refs/heads/master
| 2021-05-15T02:17:04.336187
| 2019-04-29T19:50:03
| 2019-04-29T19:50:03
| 17,699,519
| 0
| 1
| null | 2015-04-23T18:15:01
| 2014-03-13T06:15:04
|
R
|
UTF-8
|
R
| false
| false
| 2,139
|
r
|
hypoexpRuinprob.R
|
hypoexpRuinprob <- function(process) {
stopifnot(is.hypoexp(process[['claims']]))
mypoly.factors <- PolynomF::as_polylist(lapply(X = process[[c('claims', 'hypoexp', 'rates')]],
FUN = function(arg) {
c(arg, -1.0)
}))
mypoly.rhs <- mean(process[['claims']]) * PolynomF::polynom(c(process[['zeta']], -1.0)) * prod(mypoly.factors)
mypoly.lhs <- process[['zeta']] * process[['q']] * sum(PolynomF::as_polylist(
lapply(X = seq_along(mypoly.factors),
FUN = function(index) {
process[[c('claims', 'hypoexp', 'coef')]][index] * prod(mypoly.factors[-index])
})
))
r <- solve(mypoly.lhs - mypoly.rhs)
const <- solve(a = rbind(outer(X = process[[c('claims', 'hypoexp', 'rates')]],
Y = r,
FUN = function(.rates, .r) {
.rates / (.rates - .r)
}),
rep.int(1.0, length(r))),
b = rep.int(1.0, length(r)))
const1 <- r * const / (process[['p']] * process[['zeta']])
const2 <- const - const1
genexp <- function(multarg, exparg, cutoff) {
function(x) {
pmin.int(cutoff, Re(drop(crossprod(exp(outer(-exparg, x)), multarg))))
}
}
return(structure(.Data = list(psi = genexp(const, r, 1.0),
psi.1 = genexp(const1, r, 1.0),
psi.2 = genexp(const2, r, 1.0),
dens = genexp(const * r, r, Inf)),
compmethod = 'hypoexp',
riskproc = process,
parameters = list(NULL),
diagnostics = list(C = const,
C1 = const1,
C2 = const2,
r = r)))
}
|
63e490da4190647319e71eee967b70e2b522d8f8
|
12127d8a553d4b8fbe638c2297420d5995bb7d51
|
/R/data.R
|
60fe2eca8d3702b1014b0478269878c024af33b7
|
[
"CC-BY-4.0"
] |
permissive
|
stefanocoretta/coretta2019eng
|
01b55ab6efc9a44adda59f4ee95d632ec7b3b508
|
39c5eda16b2419040d79f1419b3a4321531075a1
|
refs/heads/master
| 2021-07-20T08:05:38.688629
| 2020-05-29T07:32:38
| 2020-05-29T07:32:38
| 175,707,282
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,780
|
r
|
data.R
|
#' Data on vowel, closure, and release to release duration in English.
#'
#' A dataset with durational measurements from 15 speakers of Mancunian English.
#'
#' @docType data
#' @format A tibble with 1800 observations and 21 variables.
#' \describe{
#' \item{\code{speaker}}{The speaker's ID.}
#' \item{\code{sentence}}{The sentence stimuli.}
#' \item{\code{sentence_ons}}{The onset time of the sentence stimuli (seconds).}
#' \item{\code{sentence_off}}{The offset time of the sentence stimuli (seconds).}
#' \item{\code{c1_rel}}{The release of C1 (seconds).}
#' \item{\code{c2_rel}}{The release of C2 (seconds).}
#' \item{\code{v1_ons}}{The onset of V1 (seconds).}
#' \item{\code{v1_off}}{THe onset of V2 (seconds).}
#' \item{\code{frame}}{The frame sentence.}
#' \item{\code{word}}{The target word.}
#' \item{\code{vowel}}{The target vowel V1 (\code{ar, er, ee})}.
#' \item{\code{vowel_ipa}}{The target vowel (V1) in IPA.}
#' \item{\code{c2}}{The second consonant of the target word (C2).}
#' \item{\code{place}}{The place of articulation of C2 (velar, labial).}
#' \item{\code{voicing}}{The voicing of C2 (voiceless, voiced).}
#' \item{\code{num_syl}}{The number of syllables of the target word (disyllabic, monosyllabic).}
#' \item{\code{sentence_duration}}{The duration of the sentence stimuli (milliseconds).}
#' \item{\code{rel_rel}}{The duration of the releaase-to-release interval (milliseconds).}
#' \item{\code{v1_duration}}{The duration of V1 (milliseconds).}
#' \item{\code{c2_clos_duration}}{The duration of the closure of C2 (milliseconds).}
#' \item{\code{speech_rate}}{The speech rate (number of syllables in the sentence / sentence duration).}
#' \item{\code{speech_rate_c}}{Centred speech rate.}
#' }
"eng_durations"
|
7b599346ed7bda3c2ced61f9379f14a471d30af0
|
3b4b536a3b002e9f6db2e4e98edaa0e7bc699ce5
|
/_site/R/blue_lightnings.r
|
b334d9af92db6be859647bbe588c91c909876262
|
[] |
no_license
|
verticales/verticales.github.io
|
7b81480998e7bd3e3d73a87489bf07cb4d89d363
|
005f34ccd164879055dc9c2f8bf0401f0765e68e
|
refs/heads/master
| 2021-01-10T02:00:16.008585
| 2015-11-03T23:41:47
| 2015-11-03T23:41:47
| 45,355,332
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,213
|
r
|
blue_lightnings.r
|
# ========================================================================
# Blue Lightnings
# ========================================================================
# size
n <- 500
# generations
gens <- 50
# colors
blues <- hsv(h = 0.65, s = runif(20, 0.8, 1), v = 1,
alpha = runif(20, 0.6, 0.9))
whites <- hsv(h = 0, s = 0, v = 1, alpha = runif(20, 0.1, 0.7))
png("blue_lightnings.png", width = 700, height = 400)
# set graphical parameters
op <- par(bty = "n", mar = c(0, 0.2, 0, 0.2), bg = 'black')
# open plot
plot(1:gens, rep(0, 50), type = 'l', xlim = c(1, gens), ylim = c(0.15, 0.85),
axes = FALSE)
#rect(0, 0, 50, 1, col = "black")
for (j in 1:25)
{
# initial probability
p <- 0.5
# empty vector of frequencies
freq <- c(p, rep(NA, gens-1))
# get generations
for (i in 2:gens)
{
aux <- rbinom(1, n, p)
p <- aux / n
freq[i] <- p
}
lwds <- sample(seq(0.5, 2.5, by = 0.15), 1)
lines(1:gens, freq, type = 'l', col = whites[j], lwd = lwds)
lines(1:gens, freq, type = 'l', col = blues[j], lwd = lwds)
}
# signature
legend("bottomleft", legend = "© Gaston Sanchez", bty = "n",
text.col = "gray70")
# reset par
par(op)
dev.off()
|
a253cc27d2c3db13dfdf69c28ad249c0af0eb0bc
|
ff467a97a5465d23b6abb8ec5ff088a231e5e288
|
/exercises/01-flights.R
|
dc8ac2af289e3ae7a66f09a72863fbc66ec6c392
|
[] |
no_license
|
qualityland/Grolemund_Reproducible_Research
|
68b07b6bdeca006b398b31f13b5363cdc9dce3fe
|
75c3706820d2f95ce4be01bcfd8089a990a1851d
|
refs/heads/master
| 2023-04-09T23:45:36.517006
| 2021-04-26T06:28:57
| 2021-04-26T06:28:57
| 257,597,478
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,691
|
r
|
01-flights.R
|
# QUESTION: Which NYC airport has the longest delays for a given day of the week?
# If you do nto have one or more of the packages used below, please install them
# by connecting to the internet, opening an R session and running this command (without the #):
# install.packages(c("nycflights13", "dplyr", "ggplot2", "lubridate"))
library(nycflights13) # data
library(dplyr) # cleaning functions
library(ggplot2) # plotting functions
library(lubridate) # date-time processing
# Make a new data frame named delays. Make it like this:
# take flights and then
# mutate it to create a new date variable
# and a new weekday variable, then
# select just the origin, date, weekday, and dep_delay columns, then
# filter out the rows where dep_delay equals NA
delays <-
flights %>%
mutate(date = ymd(paste(year, month, day)),
weekday = wday(date, label = TRUE, abbr = FALSE)) %>%
select(origin, date, weekday, dep_delay) %>%
filter(!is.na(dep_delay))
# MOTIVATION: Delays fluctuate throughout the year
# Make a new data frame named year. Make it like this:
# take delays and then
# group its rows into unique combinations of origin and date, then
# summarise the data by calculating the mean dep_delay for each group
year <-
delays %>%
group_by(origin, date) %>%
summarise(mean_delay = mean(dep_delay))
# Plot the mean departure delay over time by airport
ggplot(year, aes(x = date, y = mean_delay, color = origin)) +
geom_point(alpha = 0.2) +
geom_smooth(se = FALSE) +
ggtitle("Smoothed daily mean delays") +
ylab("Mean delay (m)") +
theme_bw()
# METHOD: Choose a day of the week and aggregate delays by airport
# Pick a day of the week
dow <- "Saturday"
# Make a new data frame named weekday. Make it like this:
# take delays and then
# filter out rows where weekday does not equal the day above, then
# group its rows by origin, then
# summarise the data by calculating the mean dep_delay for each group
weekday <-
delays %>%
filter(weekday == dow) %>%
group_by(origin) %>%
summarise(mean_delay = mean(dep_delay))
# RESULTS: Newark has the longest delay, LaGuardia the least
# Plot the mean delay by airport for the selected day of the week
ggplot(weekday, aes(x = origin)) +
geom_bar(aes(weight = mean_delay)) +
ggtitle(paste("Expected", dow, "departure delay", sep = " ")) +
ylab("Mean delay (m)") +
xlab("")
# A table of mean delays
weekday
# Which airport has the shortest mean departure delay on Saturday?
c("EWR" = "Newark", "JFK" = "JFK", "LGA" = "LaGuardia")[[weekday$origin[which.min(weekday$mean_delay)]]]
# How long is that mean delay?
round(min(weekday$mean_delay), 2)
|
48717fbc9ada9c13f05ca934b32a593809af7839
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/EMbC/R/functions.R
|
1f5517d2010c9a48329af1bd3626051bb3b14724
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,004
|
r
|
functions.R
|
# The EMbC Package for R
#
# Copyright 2013, 2014, 2015 Joan Garriga <jgarriga@ceab.csic.es>, Aitana Oltra <aoltra@ceab.csic.es>, John R.B. Palmer <johnrbpalmer@gmail.com>, Frederic Bartumeus <fbartu@ceab.csic.es>
#
# EMbC is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
#
# EMbC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this program. If not, see http://www.gnu.org/licenses.
# binClst auxiliary functions
# ---------------------------
getColors <- function(k,fam='RdYlBu'){
gry <- brewer.pal(4,'Greys')
if (k==1){
return(gry[4])
}
else if (k<=4){
return(c(brewer.pal(4,fam)[c(2,1,3,4)],gry[4]))
}
else if(k<=8){
return(c(brewer.pal(8,fam),gry[4]))
}
else return(c(rainbow(k),gry[4]))
}
parSet <- function(mtx=NULL,widths=NULL,heights=NULL,bg=NULL,oma=c(1,1,1,1),mar=c(3,3,0.5,0.5),mgp=c(1.5,0.4,0),cex.main=1.0,cex.lab=1.0,cex.axis=0.8){
parDef <- par(no.readonly=TRUE)
par(oma=oma)
par(mar=mar)
par(mgp=mgp)
par(cex.main=cex.main)
par(cex.lab=cex.lab)
par(cex.axis=cex.axis)
if (!is.null(mtx)){
if (is.null(widths)) widths <- rep(1,ncol(mtx))
if (is.null(heights)) heights <- rep(1,nrow(mtx))
layout(mtx,widths=widths,heights=heights)
}
if (!is.null(bg)) par(bg=bg)
return(parDef)
}
# binClstPath auxiliary functions
# -------------------------------
earthR <- function() return(6378137)
spanTime <- function(pth){
n <- nrow(pth)
return(as.numeric(difftime(pth$dTm[2:n],pth$dTm[1:(n-1)],units="secs")))}
loxDst <- function(pth){
n <- nrow(pth)
dltLon <- (pth$lon[2:n]-pth$lon[1:(n-1)])*pi/180
dltLat <- (pth$lat[2:n]-pth$lat[1:(n-1)])*pi/180
dltRad <- numeric((n-1))
for (i in 1:(n-1)) {
if (dltLat[i] == 0){
q <- cos(pth$lat[i]*pi/180)}
else{
dltPhi <- log(tan(pi/4+pth$lat[i+1]*pi/360))-log(tan(pi/4+pth$lat[i]*pi/360))
q <- dltLat[i]/dltPhi
}
dltRad[i] = sqrt(dltLat[i]^2+(q*dltLon[i])^2)
}
return(dltRad*earthR())}
loxTht <-function(pth){
n <- nrow(pth)
dltLon <- (pth$lon[2:n]-pth$lon[1:(n-1)])
dltLat <- (pth$lat[2:n]-pth$lat[1:(n-1)])
lclTht <- numeric((n-1))
for (i in 1:(n-1)){
tngTht <- ifelse (dltLon[i]==0,0,
log(tan(pi/4+pth$lat[i+1]*pi/360)/tan(pi/4+pth$lat[i]*pi/360))/(dltLon[i]*pi/180))
if (dltLon[i]==0 && dltLat[i]==0) lclTht[i] <- 2*pi
else if (dltLon[i] == 0) lclTht[i] <- ifelse (dltLat[i]>0,0,pi)
else if (dltLat[i] == 0) lclTht[i] <- ifelse (dltLon[i]>0,pi/2,3*pi/2)
else {
lclTht[i] <- ifelse (dltLon[i]>0,atan2(tngTht,1),atan2(-tngTht,-1))
if (dltLat[i] > 0) {
lclTht[i] <- 5*pi/2-lclTht[i]
while (lclTht[i] > 2*pi)lclTht[i] <- lclTht[i]-2*pi}
else {
lclTht[i] <- pi/2-lclTht[i]}
}
}
return(lclTht)}
getSpeed <- function(bCP){
return(apply(cbind(bCP@dst,bCP@spn),1,function(x) if (x[2]>0) x[1]/x[2] else 0))}
getTurns <- function(bCP){
Z <- numeric((nrow(bCP@pth)-1))
Z[1] <- 0
for (i in 2:length(Z)) {
if (bCP@hdg[i] == 2*pi || bCP@hdg[i-1] == 2*pi) Z[i] <- 0
else {
Z[i] <- abs(bCP@hdg[i]-bCP@hdg[i-1])
if (Z[i] > pi) Z[i] <- 2*pi - Z[i]
}
}
return(c(Z,0))}
stCertainty <- function(bCP){
uRef <- median(bCP@spn)
if (uRef<=60){
Ttbl <- sort(table(bCP@spn))
iRef <- length(Ttbl)
while (iRef>1 && uRef<=60){
uRef <- as.numeric(names(Ttbl[iRef]))
iRef <- iRef -1
}
}
U <- sapply(bCP@spn,function(x) if (x>0) min(uRef/x,1) else 0)
return(cbind(U,U))}
bCPStd <- function(pth){
names(pth)[1:3] <- c('dTm','lon','lat')
return(pth)}
setMarkerSizes <- function(bCP,nMarkerSizeClasses,minMarkerRadius,maxMarkerRadius,logDurations=TRUE){
if(logDurations)
durationVariable = log(bCP@midPoints@data$duration)
else
durationVariable = bCP@midPoints@data$duration
range = maxMarkerRadius - minMarkerRadius
if(range<0) return("Error: minmarkerRadius must be smaller than maxMarkerRadius. Please use new values.")
if(length(nMarkerSizeClasses==1)){
if(nMarkerSizeClasses==0){
durationScaleFactor = range/(max(durationVariable)-min(durationVariable))
circleRadiiMeters = (minMarkerRadius+durationScaleFactor*(durationVariable- min(durationVariable)))
} else if(nMarkerSizeClasses==1){
circleRadiiMeters = rep(minMarkerRadius + (maxMarkerRadius-minMarkerRadius)/2, length(durationVariable))
}
else if(nMarkerSizeClasses>1){
markerSizeClasses = minMarkerRadius + (0:(nMarkerSizeClasses-1))*range/(nMarkerSizeClasses-1)
circleRadiiMeters = markerSizeClasses[unclass(cut(durationVariable, nMarkerSizeClasses))]
} else if(nMarkerSizeClasses < 0) return("Error: nMarkerSizeClasses must be >= 0. Please specify a new value.")
} else if(length(nMarkerSizeClasses>1)){
if(min(nMarkerSizeClasses)>min(durationVariable))
nMarkerSizeClasses = c(min(durationVariable), nMarkerSizeClasses)
else if(max(nMarkerSizeClasses)<max(durationVariable))
nMarkerSizeClasses = c(nMarkerSizeClasses,max(durationVariable))
markerSizeClasses = minMarkerRadius + (0:(length(nMarkerSizeClasses)-2))*range/(length(nMarkerSizeClasses)-2)
circleRadiiMeters = markerSizeClasses[unclass(cut(durationVariable, nMarkerSizeClasses))]
}
return(circleRadiiMeters)}
getSolarPos <- function(pth,scv){
solP <- solarpos(cbind(pth$lon,pth$lat),pth$dTm,proj4string=CRS("+proj=longlat +datum=WGS84"))
if (scv=='both') return(solP)
else if (scv=='azimuth') return(solP[,1])
else if (scv=='height') return(solP[,2])
else if (scv=='rheight') return(sin(solP[,2]*pi/180))
else if (scv=='rheight2') return(sin(sin(solP[,2]*pi/180)*pi/2))
else if (scv=='rheight3') return(sin(sin(sin(solP[,2]*pi/180)*pi/2)*pi/2))
}
# Auxiliar Non-export functions
# -----------------------------
# binClstpath auxiliary functions
formatSecs <- function(secs){
hr <- floor(secs/(60*60))
min <- floor((secs - (hr*60*60))/60)
sec <- round(secs - ((hr*60*60)+(min*60)),digits=2)
return(paste(hr,min,sec,sep=':'))}
formatMeters <- function(meters){
if(meters < 1000) return(paste(round(meters, 0), "m"))
else return(paste(round(meters/1000, 2), "km"))}
# format Tht parameters to given length and decimals.
frmTht <- function(Tht,d,w){
paste(lapply(1:length(Tht$M),function(m){
paste(formatC(Tht$M[m],format='f',digits=d,width=w),formatC(sqrt(Tht$S[m,m]),format='f',digits=d,width=w),sep=" ")}),collapse=" ")
}
# clustering binary labels.
getkLbls <- function(bC,kNmbrs=FALSE)
return(lapply(1:bC@k, function(k){
bk <- paste(as.integer(rev(intToBits(k-1)[1:bC@m])),collapse="")
if (kNmbrs) {
if (bC@k<9) paste(k,'.',gsub('1','H',gsub('0','L',bk)),sep="")
else paste(formatC(k,width=2,flag='0'),'.',gsub('1','H',gsub('0','L',bk)),sep="")
}
else gsub('1','H',gsub('0','L',bk))
}))
# max/min scaler
maxminScale <- function(x)
return((x-min(x))/(max(x)-min(x)))
# get subPth with selected clusters
getSubPth <- function(bCP,showClst){
subPth <- which(bCP@A %in% showClst)
bCP@pth <- bCP@pth[subPth,]
bCP@X <- bCP@X[subPth,]
bCP@spn <- bCP@spn[subPth]
bCP@dst <- bCP@dst[subPth]
bCP@A <- bCP@A[subPth]
return(bCP)}
# compute proportional limits for path view
getPropLims <- function(pth, a, b){
plims <- list(x=c(min(pth$lon[a:b]), max(pth$lon[a:b])),
y=c(min(pth$lat[a:b]), max(pth$lat[a:b])))
shorter <- which.min(abs(sapply(plims, diff)))
larger <- which.max(abs(sapply(plims, diff)))
while (abs(diff(plims[[shorter]])) < abs(diff(plims[[larger]]))) plims[[shorter]] <- plims[[shorter]] + c(-0.0001,+0.0001)
return(plims)
}
# bivariate binary clustering scatterplot with reference lines and legend
sctr2D <- function(bC){
if (length(bC@A)==0){
plot(bC@X[,1],bC@X[,2],pch=20,xlab=colnames(bC@X)[1],ylab=colnames(bC@X)[2])}
else {
plot(bC@X[,1],bC@X[,2],col=bC@C[bC@A],pch=20,xlab=colnames(bC@X)[1],ylab=colnames(bC@X)[2])}
lines(c(bC@R[1,3],bC@R[1,3]),c(bC@R[1,2],bC@R[1,4]),col='grey')
lines(c(bC@R[1,1],bC@R[1,3]),c(bC@R[1,4],bC@R[1,4]),col='grey')
lines(c(bC@R[4,1],bC@R[4,1]),c(bC@R[4,2],bC@R[4,4]),col='grey')
lines(c(bC@R[4,1],bC@R[4,3]),c(bC@R[4,2],bC@R[4,2]),col='grey')
legend("topright",legend=c(getkLbls(bC),'NC'),col=bC@C,cex=0.8,lwd=3,text.font=1,bty='n')
}
# multivariate binary clustering scatterplot
sctr3D <- function(obj,showVars=numeric(),showClst=numeric()){
# set vars and clusters subset
lims <- apply(obj@X,2,range)
if (length(showVars)>=3) m <- showVars[1:3]
else m <- c(1,2,3)
X <- obj@X
A <- obj@A
if (length(showClst)>0) {
X <- X[which(A %in% showClst),]
A <- A[which(A %in% showClst)]}
# titles and axes labels
par(mar=c(4,4,2,0.5))
mttl <- paste(colnames(obj@X)[m[1]],c(":LOW",":HIGH"),sep='')
labs <- paste(colnames(obj@X)[m[2:3]],sep='')
# plot scatterplpot for LOW values of the splitting variable
klow <- which(substr(getkLbls(obj),m[1],m[1])=='L')
Xlow <- X[which(A %in% klow),]
plot(Xlow[,m[2]],Xlow[,m[3]],col=obj@C[A[which(A %in% klow)]],pch=20,xlim=lims[,m[2]],ylim=lims[,m[3]],main=mttl[1],xlab=labs[1],ylab=labs[2],cex.main=1.0,font.main=1)
# plot scatterplpot for HIGH values of the splitting variable
khgh <- which(substr(getkLbls(obj),m[1],m[1])=='H')
Xhgh <- X[which(A %in% khgh),]
plot(Xhgh[,m[2]],Xhgh[,m[3]],col=obj@C[A[which(A %in% khgh)]],pch=20,xlim=lims[,m[2]],ylim=lims[,m[3]],main=mttl[2],xlab=labs[1],ylab=labs[2],cex.main=1.0,font.main=1)
# plot legend
par(mar=c(4,1,0.5,0.5))
plot(0,axes=FALSE,xlab="",ylab="",pch='')
obj@C[which(lapply(1:obj@k,function(k) length(obj@A[which(obj@A==k)])==0)==TRUE)] <- brewer.pal(4,'Greys')[3]
if (length(showClst)>0) obj@C[which(lapply(1:obj@k,function(k) !(k%in%showClst))==TRUE)] <- brewer.pal(4,'Greys')[3]
legend("center",legend=c(getkLbls(obj,kNmbrs=TRUE),'NC'),col=obj@C,cex=0.8,lwd=3,text.font=1,bty='n')
}
|
719ae9797fe4ff776fe9a699a776762101a2836f
|
0afc025181a55a24b1bf22c9a950f5332dba7864
|
/R/mpersonalized_cv.R
|
d814bfcfba39c99474e01af5a7411b5d97026bbc
|
[] |
no_license
|
chenshengkuang/mpersonalized
|
10a9de0ccc37dc3e50395d8b2cc5dd5be6c153fb
|
949b5a69370df42541edc2e089db1825e4e77a7f
|
refs/heads/master
| 2021-10-25T14:24:07.586521
| 2019-04-04T16:19:59
| 2019-04-04T16:19:59
| 106,764,230
| 2
| 1
| null | 2019-04-04T16:20:00
| 2017-10-13T01:42:51
|
R
|
UTF-8
|
R
| false
| false
| 29,325
|
r
|
mpersonalized_cv.R
|
#' @title Cross Validation for \code{mpersonalized}
#'
#' @description This function implments \code{mpersonalized} and use cross validatation to tune penalty parameter.
#' The optimal penalty parameter is selected by minimizing \deqn{\sum_{i=1}^{n_k}\frac{|\hat{C}_k(X_{i})|}{\sum_{i=1}^{n_k}|\hat{C}_k(X_{i})|}\bigl [1\{\hat{C}_k(X_{i})>0\}-g_k(X_{i})\bigr]^2}
#' in the leave-out fold, where \eqn{\hat{C}_k(X_{i})} in the leave-out fold is independently estimated from the training set.
#'
#' @param problem A character string specifiy whether user want to solve "meta-analysis" or
#' "multiple outcomes" problem. For \code{problem = "meta-analysis"}, user should also supply
#' \code{Xlist}, \code{Ylist}, \code{Trtlist}. For \code{problem = "multiple outcomes"},
#' user should supply \code{X}, \code{Ylist}, \code{Trt}.
#' @param X Covariate matrix that should be supplied when \code{problem = "multiple outcomes"}
#' with rows indicating subjects and columns indicating covariates.
#' @param Trt Treatment vector that should be supplied when \code{problem = "multiple outcomes"},
#' which should be coded as 0 or 1.
#' @param P Propensity score vector when \code{problem = "multiple outcomes"}. If not supplied,
#' then study is treated as randomzied trial and the propensity score is estimated as the proportion
#' of 1's in \code{Trt} for every subject.
#' @param Xlist A list object that should be supplied when \code{problem = "meta-analysis"},
#' with \eqn{k}th element denoting the covariate matrix of study \eqn{k}.
#' @param Ylist When \code{problem = "meta-analysis"}, \code{Ylist} should be a list object with \eqn{k}th element
#' denoting the response vector of study \eqn{k}. When \code{problem = "multiple outcomes"}, \code{Ylist} should
#' be a list object with \eqn{k}th element denoting the \eqn{k}th outcome.
#' @param Trtlist A list object that should be supplied when \code{problem = "meta-analysis"},
#' with \eqn{k}th element denoting the treatment vector of study \eqn{k} (coded as 0 or 1).
#' @param Plist A list object that should be supplied when \code{problem = "meta-analysis"},
#' with \eqn{k}the element denoting the propensity score vector of study \eqn{k}.
#' If not supplied, then each study is treated as randomized trial and the corresponding propensity score
#' is estimated as the proportion of 1's in the \eqn{k}th element of \code{Trtlist} for all subjects.
#' @param typelist A list object with \eqn{k}th element denoting the type of outcome corresponding
#' to the \eqn{k}th element in \code{Ylist}. Each element could be "continuous" or "binary".
#' @param penalty For different rules, the penalty could be "lasso", "GL", "SGL", "fused",
#' "lasso+fused", "GL+fused", "SGL+fused", or "SGL+SL". For single rule, the penalty could only be "lasso".
#' For \code{penalty = "none"}, use function \code{mpersonalized} instead.
#' User should always input \code{penalty} and then supply correponding penalty parameters sequence
#' if needed.
#' @param lambda1 \eqn{\lambda_1} in the framework of different rules. If not supplied, a default
#' sequence will be computed.
#' @param lambda2 \eqn{\lambda_2} in the framework of different rules. If not supplied, a default
#' sequence will be computed.
#' @param tau0 Parameter \eqn{\tau_0} for the \code{"SGL+SL"} penalty in the framework of different rules.
#' If not supplied, a default sequence will be computed.
#' @param alpha \eqn{\alpha} in the framework of different rules. If not supplied, a default value
#' will be used depending on \code{penalty}.
#' @param single_rule_lambda \eqn{\lambda_{single}} in the framework of single rule.
#' @param single_rule A logical value, whether the single treatment framework is used. Deafult is \code{FALSE}.
#' @param cv_folds Number of folds needed for cross-validation. Default is 5
#' @param admm_control A list of parameters which user can specify to control the admm algorithm.
#' In \code{admm_control}, the following parameters can be supplied:
#' \code{abs.tol}, absolute tolerance; \code{rel.tol}, relative tolerance; \code{maxit}, maximum number of iterations;
#' \code{rho}, Lagrangian parameter.
#' @param contrast_builder_control A list of parameters which user can specify to control estimation of
#' contrast function. In \code{contrast_builder_control},
#' the following parameters could be supplied: \code{eff_aug}, a logical value whether efficiency augmentation
#' should be implemented; \code{response_model}, a character string specify what outcome model to use
#' if \code{eff_aug = TRUE}, \code{response_model} could be "lasso" or "linear";
#' \code{contrast_builder_folds}, the number of folds used in cross validation when \code{response_model = "lasso"}.
#' @param num_lambda1 If \code{lambda1} is not specified by user, user could still specify the length of the
#' \code{lambda1} sequence. The default length is 10.
#' @param num_lambda2 If \code{lambda2} is not specified by user, user could still specify the length of the
#' \code{lambda2} sequence. The default length is 10.
#' @param num_tau0 If \code{tau0} is not specified by user, the user can still specify the length of the
#' \code{tau0} sequence. The default length is 11.
#' @param min_tau If \code{tau0} is not specified by user, \code{min_tau} specifies the minimum value
#' for \eqn{\tau_0}. The largest value for \eqn{\tau_0} will be \code{1 / min_tau}.
#' @param num_single_rule_lambda If \code{single_rule_lambda} is not specified, user could still specify the length
#' of the \code{single_rule_lambda} sequence. The default length is 50.
#'
#' @import glmnet SGL caret Matrix
#'
#' @return An S3 object of class "mp_cv", which contains the information of the model with the optimal lambda. It can be supplied
#' to some other functions in mperosnalized package for further analysis or prediction.
#'
#' \item{penalty_parameter_sequence}{A matrix object with each row denoting a configuration of the penalty parameters.}
#' \item{opt_penalty_parameter}{Optimal penalty parameter chosen by minimizing the cross validation error.}
#' \item{intercept}{The vector of intercepts corresponding to the optimal penalty parameter.}
#' \item{beta}{The coefficient matrix corresponding to the optimal penalty parameter.}
#' \item{number_covariates}{Number of candidate covariates considered.}
#' \item{number_studies_or_outcomes}{Number of studies if \code{problem = "meta-analysis"} or number of outcomes if \code{problem = "multiple outcomes"}.}
#'
#' @examples
#' set.seed(123)
#' sim_dat = simulated_dataset(n = 200, problem = "meta-analysis")
#' Xlist = sim_dat$Xlist; Ylist = sim_dat$Ylist; Trtlist = sim_dat$Trtlist
#'
#' # fit different rules with group lasso penalty
#' mp_cvmod_diff = mpersonalized_cv(problem = "meta-analysis",
#' Xlist = Xlist, Ylist = Ylist, Trtlist = Trtlist,
#' penalty = "GL", single_rule = FALSE)
#'
#' mp_cvmod_diff$intercept
#' mp_cvmod_diff$beta
#'
#' # fit a single rule with lasso penalty
#' mp_cvmod_single = mpersonalized_cv(problem = "meta-analysis",
#' Xlist = Xlist, Ylist = Ylist, Trtlist = Trtlist,
#' penalty = "lasso", single_rule = TRUE)
#'
#' mp_cvmod_single$intercept
#' mp_cvmod_single$beta
#' set.seed(NULL)
#' @export
mpersonalized_cv = function(problem = c("meta-analysis", "multiple outcomes"),
X, Trt, P = NULL,
Xlist, Ylist, Trtlist, Plist = replicate(length(Xlist), NULL, simplify = FALSE),
typelist = replicate(length(Xlist), "continuous", simplify = FALSE),
penalty = c("lasso", "GL", "SGL", "fused",
"lasso+fused", "GL+fused", "SGL+fused",
"SGL+SL"),
lambda1 = NULL, lambda2 = NULL, tau0 = NULL,
single_rule_lambda = NULL,
num_lambda1 = ifelse(!is.null(lambda1), length(lambda1),10),
num_lambda2 = ifelse(!is.null(lambda2), length(lambda2),10),
num_tau0 = ifelse(!is.null(tau0), length(tau0), 11),
min_tau = 1e-2,
num_single_rule_lambda = ifelse(!is.null(single_rule_lambda), length(single_rule_lambda), 50),
alpha = NULL, single_rule = FALSE, cv_folds = 5,
admm_control = NULL,
contrast_builder_control = NULL){
penalty = match.arg(penalty)
problem = match.arg(problem)
if (problem == "multiple outcomes"){
if (missing(X) | missing(Ylist) | missing(Trt))
stop("For multiple outcomes, X, Ylist, Trt need to be supplied!")
q = length(Ylist)
Xlist = replicate(q, X, simplify = FALSE)
Trtlist = replicate(q, Trt, simplify = FALSE)
#a default estimate for the propensity score
if (is.null(P)){
P = rep(sum(Trt) / length(Trt), length(Trt))
} else {
if (length(P) == 1)
P = rep(P, length(Trt))
}
Plist = replicate(q, P, simplify = FALSE)
} else if (problem == "meta-analysis"){
if (missing(Xlist) | missing(Ylist) | missing(Trtlist))
stop("For meta-analysis, Xlist, Ylist, Trtlist need to be supplied!")
#a default estimate for the propensity score
Plist = mapply(
function(P, Trt){
if (is.null(P)){
P = rep(sum(Trt) / length(Trt), length(Trt))
} else {
if (length(P) == 1)
P = rep(P, length(Trt))
}
return(P)
},
P = Plist,
Trt = Trtlist,
SIMPLIFY = FALSE)
}
q = length(Xlist)
p = dim(Xlist[[1]])[2]
#construct contrast for the data
Conlist = vector("list", q)
for (j in 1:q){
Conlist[[j]] = do.call(contrast_builder, c(list(X = Xlist[[j]],
Y = Ylist[[j]],
ori_Trt = Trtlist[[j]],
P = Plist[[j]],
type = typelist[[j]]),
contrast_builder_control))
}
standardized_data = contrast_standardize(Conlist = Conlist, Xlist = Xlist,
single_rule = single_rule)
modelYlist = standardized_data$modelYlist
modelXlist = standardized_data$modelXlist
#check whether the information provided is correct and set up the value for lambdas if not provided
if (single_rule == TRUE){
Ybar = standardized_data$Ybar
Xbar = standardized_data$Xbar
Xsd = standardized_data$Xsd
if (penalty != "lasso")
stop("When single rule is required, the penalty must be lasso!(for penalty = none, use function 'mpersonalized' instead.")
if (!is.null(lambda1) | !is.null(lambda2) | !is.null(alpha))
warning("When single rule = TRUE, the value for lambda1, lambda2, alpha are ignored!")
if (is.null(single_rule_lambda)){
lambda_default = lambda_estimate(modelXlist = modelXlist, modelYlist = modelYlist,
penalty = penalty, single_rule = single_rule,
num_single_rule_lambda = num_single_rule_lambda)
single_rule_lambda = lambda_default$single_rule_lambda
}
} else {
Ybarlist = standardized_data$Ybarlist
Xbarlist = standardized_data$Xbarlist
Xsdlist = standardized_data$Xsdlist
if (!is.null(single_rule_lambda))
warning("When single rule = FALSE, the value for single_rule_lambda is ignored!")
if (penalty == "none")
stop("For penalty = none, use function 'mpersonalized' instead.")
if (penalty %in% c("fused", "lasso+fused", "GL+fused", "SGL+fused")) {
if (penalty == "fused"){
if (!is.null(alpha)){
warning("When penalty = fused, values of alpha is ignored!")
alpha = NULL
}
if (!is.null(lambda1))
if (sum(lambda1 != 0) > 0)
warning("When penalty = fused, value of lambda1 is automatically set to be 0!")
lambda1 = 0
}
if (penalty == "lasso+fused"){
if (!is.null(alpha))
if(alpha != 1)
warning("When penalty = lasso+fused, alpha is automatically set to be 1!")
alpha = 1
} else if (penalty == "GL+fused"){
if (!is.null(alpha))
if(alpha != 0)
warning("When penalty = GL+fused, alpha is automatically set to be 0!")
alpha = 0
} else if (penalty == "SGL+fused"){
if (!is.null(alpha)){
if (alpha == 0 | alpha == 1){
warning("When penalty = SGL+fused, alpha cannot be set as 0 or 1, and default is 0.95!")
alpha = 0.95
}
} else alpha = 0.95
}
if (is.null(lambda1) | is.null(lambda2)){
lambda_default = lambda_estimate(modelXlist = modelXlist, modelYlist = modelYlist,
penalty = penalty, single_rule = single_rule, alpha = alpha,
num_lambda1 = num_lambda1, num_lambda2 = num_lambda2,
lambda1 = lambda1, lambda2 = lambda2)
if (is.null(lambda1))
lambda1 = lambda_default$lambda1
if (is.null(lambda2))
lambda2 = lambda_default$lambda2
}
} else if (penalty %in% c("lasso", "GL", "SGL", "SGL+SL")){
if (!is.null(lambda2)){
if (sum(lambda2 != 0) > 0){
warning("When penalty = lasso/GL/SGL/SGL+SL, the value for lambda2 is ignored and automatically set to be 0!")
}
}
lambda2 = 0
if (penalty == "lasso"){
if (!is.null(alpha))
if (alpha != 1)
warning("When penalty = lasso, alpha is automatically set to be 1!")
alpha = 1
} else if (penalty == "GL"){
if (!is.null(alpha))
if (alpha != 0)
warning("When penalty = GL, alpha is automatically set to be 0!")
alpha = 0
} else if (penalty == "SGL"){
if (!is.null(alpha)){
if (alpha == 0 | alpha == 1){
warning("When penalty = SGL, alpha cannot be set as 0 or 1, and default is 0.95!")
alpha = 0.95
}
} else alpha = 0.95
} else if (penalty == "SGL+SL"){
if (!is.null(alpha)){
if (alpha < 0 | alpha > 1){
warning("When penalty = SGL+SL, alpha must be between 0 and 1. The default is 0.95!")
alpha = 0.95
}
} else alpha = 0.95
}
if (is.null(lambda1)){ # & penalty != "SGL+SL"){
lambda_default = lambda_estimate(modelXlist = modelXlist, modelYlist = modelYlist,
penalty = penalty, single_rule = single_rule, alpha = alpha,
num_lambda1 = num_lambda1, lambda1 = lambda1)
lambda1 = lambda_default$lambda1
}
if (penalty == "SGL+SL")
{
if (is.null(tau0))
{
tau0 <- gen_tau0(num_tau0, min_tau)
}
}
}
}
#build cross-validation folds
folds_index = lapply(Xlist, function(x) createFolds(1:dim(x)[1], k = cv_folds))
#determine the dimension for the cost for tuning penalty parameters
if (single_rule == TRUE){
tune_cost = numeric(length(single_rule_lambda))
} else {
if (penalty %in% c("lasso", "GL", "SGL")){
tune_cost = numeric(length(lambda1))
names(tune_cost) <- paste0("lam=", round(lambda1, 2))
} else if (penalty %in% c("fused", "lasso+fused", "GL+fused", "SGL+fused")){
tune_cost = matrix(0, nrow = length(lambda1), ncol = length(lambda2))
rownames(tune_cost) <- paste0("lam1=", round(lambda1, 2))
colnames(tune_cost) <- paste0("lam2=", round(lambda2, 2))
} else if (penalty %in% c("SGL+SL")){
tune_cost = matrix(0, nrow = if(is.null(lambda1)){num_lambda1}else{length(lambda1)}, ncol = length(tau0))
rownames(tune_cost) <- paste0("lam=", round(lambda1, 2))
colnames(tune_cost) <- paste0("tau0=", round(tau0, 2))
}
}
#carry out method for each cross validation fold
for (k in 1:cv_folds){
cv_Xlist = vector("list", q); left_Xlist = vector("list", q)
cv_Ylist = vector("list", q); left_Ylist = vector("list", q)
cv_Trtlist = vector("list", q); left_Trtlist = vector("list", q)
cv_Plist = vector("list", q); left_Plist = vector("list", q)
for (j in 1:q){
cv_Xlist[[j]] = Xlist[[j]][-folds_index[[j]][[k]],]
cv_Ylist[[j]] = Ylist[[j]][-folds_index[[j]][[k]]]
cv_Trtlist[[j]] = Trtlist[[j]][-folds_index[[j]][[k]]]
cv_Plist[[j]] = Plist[[j]][-folds_index[[j]][[k]]]
left_Xlist[[j]] = Xlist[[j]][folds_index[[j]][[k]],]
left_Ylist[[j]] = Ylist[[j]][folds_index[[j]][[k]]]
left_Trtlist[[j]] = Trtlist[[j]][folds_index[[j]][[k]]]
left_Plist[[j]] = Plist[[j]][folds_index[[j]][[k]]]
}
cv_Conlist = vector("list", q)
left_Conlist = vector("list", q)
for (j in 1:q){
cv_Conlist[[j]] = do.call(contrast_builder, c(list(X = cv_Xlist[[j]],
Y = cv_Ylist[[j]],
ori_Trt = cv_Trtlist[[j]],
P = cv_Plist[[j]],
type = typelist[[j]]),
contrast_builder_control))
left_Conlist[[j]] = do.call(contrast_builder, c(list(X = left_Xlist[[j]],
Y = left_Ylist[[j]],
ori_Trt = left_Trtlist[[j]],
P = left_Plist[[j]],
type = typelist[[j]]),
contrast_builder_control))
}
cv_standardized_data = contrast_standardize(Conlist = cv_Conlist, Xlist = cv_Xlist, single_rule = single_rule)
cv_modelYlist = cv_standardized_data$modelYlist
cv_modelXlist = cv_standardized_data$modelXlist
#transform contrast into binary data for the left out fold
left_sConlist = lapply(left_Conlist, function(y) as.numeric(y > 0))
left_Wlist = lapply(left_Conlist, abs)
left_dataWlist = lapply(left_Wlist, sum)
left_adj_Wlist = mapply(function(w, dataw) w / dataw, w = left_Wlist,
dataw = left_dataWlist, SIMPLIFY = FALSE)
if (single_rule == TRUE){
cv_Ybar = cv_standardized_data$Ybar
cv_Xbar = cv_standardized_data$Xbar
cv_Xsd = cv_standardized_data$Xsd
cv_model = single_rule_lasso_method(modelYlist = cv_modelYlist, modelXlist = cv_modelXlist,
Ybar = cv_Ybar, Xbar = cv_Xbar, Xsd = cv_Xsd, lambda = single_rule_lambda)
cv_interceptlist = cv_model$interceptlist
cv_betalist = cv_model$betalist
for (ind in 1:length(single_rule_lambda))
tune_cost[ind] = tune_cost[ind] + sum(unlist(mapply(function(w, y, x) sum(w * (y - cv_interceptlist[[ind]] - x %*% cv_betalist[[ind]]) ^ 2),
w = left_adj_Wlist, y = left_sConlist, x = left_Xlist, SIMPLIFY = FALSE)))
} else {
cv_Ybarlist = cv_standardized_data$Ybarlist
cv_Xbarlist = cv_standardized_data$Xbarlist
cv_Xsdlist = cv_standardized_data$Xsdlist
if (penalty %in% c("fused", "lasso+fused", "GL+fused", "SGL+fused")){
cv_model = meta_method(modelYlist = cv_modelYlist, modelXlist = cv_modelXlist,
Ybarlist = cv_Ybarlist, Xbarlist = cv_Xbarlist, Xsdlist = cv_Xsdlist,
lambda1 = lambda1, lambda2 = lambda2, alpha = alpha, admm_control = admm_control)
cv_interceptlist = cv_model$interceptlist
cv_betalist = cv_model$betalist
for (ind1 in 1:length(lambda1))
for (ind2 in 1:length(lambda2))
tune_cost[ind1, ind2] = tune_cost[ind1, ind2] + sum(unlist(mapply(function(w, y, x, intercept, beta) sum(w * (y - intercept - x %*% beta) ^ 2),
w = left_adj_Wlist, y = left_sConlist, x = left_Xlist,
intercept = as.list(cv_interceptlist[[(ind1 - 1) * length(lambda2) + ind2]]),
beta = split(cv_betalist[[(ind1 - 1) * length(lambda2) + ind2]], row(cv_betalist[[(ind1 - 1) * length(lambda2) + ind2]])), SIMPLIFY = FALSE)))
} else if (penalty %in% c("lasso", "SGL", "GL")) {
cv_model = sparse_group_lasso_method(modelYlist = cv_modelYlist, modelXlist = cv_modelXlist,
Ybarlist = cv_Ybarlist, Xbarlist = cv_Xbarlist, Xsdlist = cv_Xsdlist,
lambda = lambda1, alpha = alpha)
cv_interceptlist = cv_model$interceptlist
cv_betalist = cv_model$betalist
for (ind in 1:length(lambda1))
tune_cost[ind] = tune_cost[ind] + sum(unlist(mapply(function(w, y, x, intercept, beta) sum(w * (y - intercept - x %*% beta) ^ 2),
w = left_adj_Wlist, y = left_sConlist, x = left_Xlist,
intercept = as.list(cv_interceptlist[[ind]]),
beta = split(cv_betalist[[ind]], row(cv_betalist[[ind]])), SIMPLIFY = FALSE)))
} else if (penalty %in% c("SGL+SL")) {
cv_model = sparse_group_fused_lasso_method(modelYlist = cv_modelYlist, modelXlist = cv_modelXlist,
Ybarlist = cv_Ybarlist, Xbarlist = cv_Xbarlist, Xsdlist = cv_Xsdlist,
lambda = lambda1, alpha = alpha, tau0 = tau0, nlambda = num_lambda1)
cv_interceptlist = cv_model$interceptlist
cv_betalist = cv_model$betalist
penalty_parameter_sequence_all <- cv_model$penalty_parameter_sequence
if (is.null(lambda1))
{
nlam1 <- num_lambda1
} else
{
nlam1 <- length(lambda1)
}
for (ind1 in 1:nlam1)
for (ind2 in 1:length(tau0))
tune_cost[ind1, ind2] = tune_cost[ind1, ind2] + sum(unlist(mapply(function(w, y, x, intercept, beta) sum(w * (y - intercept - x %*% beta) ^ 2),
w = left_adj_Wlist, y = left_sConlist, x = left_Xlist,
intercept = as.list(cv_interceptlist[[(ind1 - 1) * length(tau0) + ind2]]),
beta = split(cv_betalist[[(ind1 - 1) * length(tau0) + ind2]], row(cv_betalist[[(ind1 - 1) * length(tau0) + ind2]])), SIMPLIFY = FALSE)))
}
}
}
#for the complete data set
if (single_rule == TRUE){
Ybar = standardized_data$Ybar
Xbar = standardized_data$Xbar
Xsd = standardized_data$Xsd
full_model = single_rule_lasso_method(modelYlist = modelYlist, modelXlist = modelXlist,
Ybar = Ybar, Xbar = Xbar, Xsd = Xsd, lambda = single_rule_lambda)
opt_ind = which.min(tune_cost)
penalty_parameter_sequence = as.matrix(single_rule_lambda)
colnames(penalty_parameter_sequence) = "single_rule_lambda"
penalty_parameter_sequence_all = penalty_parameter_sequence
model_info = list(intercept = full_model$interceptlist[[opt_ind]], beta = full_model$betalist[[opt_ind]],
penalty_parameter_sequence = penalty_parameter_sequence,
opt_penalty_parameter= penalty_parameter_sequence[opt_ind,],
cv_error = tune_cost,
penalty = "lasso", single_rule = TRUE,
number_covariates = p, number_studies_or_outcomes = q,
Xlist = Xlist, Ylist = Ylist, Trtlist = Trtlist, Plist = Plist,
Ybar = Ybar, Xbar = Xbar, Xsd = Xsd,
problem = problem)
} else {
Ybarlist = standardized_data$Ybarlist
Xbarlist = standardized_data$Xbarlist
Xsdlist = standardized_data$Xsdlist
if (penalty %in% c("fused", "lasso+fused", "GL+fused", "SGL+fused")){
opt_ind = which(tune_cost == min(tune_cost), arr.ind = TRUE)
opt_ind1 = opt_ind[1]; opt_ind2 = opt_ind[2]
full_model = meta_method(modelYlist = modelYlist, modelXlist = modelXlist,
Ybarlist = Ybarlist, Xbarlist = Xbarlist, Xsdlist = Xsdlist,
lambda1 = lambda1[opt_ind1], lambda2 = lambda2[opt_ind2], alpha = alpha, admm_control = admm_control)
penalty_parameter_sequence = matrix(0, ncol = 2, nrow = length(lambda1) * length(lambda2))
colnames(penalty_parameter_sequence) = c("lambda1", "lambda2")
for (ind1 in 1:length(lambda1)){
for (ind2 in 1:length(lambda2)){
penalty_parameter_sequence[(ind1 - 1) * length(lambda2) + ind2,] = c(lambda1[ind1], lambda2[ind2])
}
}
penalty_parameter_sequence_all = penalty_parameter_sequence
opt_penalty_parameter = penalty_parameter_sequence[(opt_ind1 - 1) * length(lambda2) + opt_ind2,]
model_info = list(intercept = full_model$interceptlist[[1]], beta = full_model$betalist[[1]],
penalty_parameter_sequence = penalty_parameter_sequence,
opt_penalty_parameter = opt_penalty_parameter,
cv_error = tune_cost,
alpha = alpha, penalty = penalty, single_rule = FALSE,
number_covariates = p, number_studies_or_outcomes = q,
Xlist = Xlist, Ylist = Ylist, Trtlist = Trtlist, Plist = Plist,
Ybar = Ybarlist, Xbar = Xbarlist, Xsd = Xsdlist,
problem = problem)
} else if (penalty %in% c("lasso", "SGL", "GL")){
full_model = sparse_group_lasso_method(modelYlist = modelYlist, modelXlist = modelXlist,
Ybarlist = Ybarlist, Xbarlist = Xbarlist, Xsdlist = Xsdlist,
lambda = lambda1, alpha = alpha)
opt_ind = which.min(tune_cost)
penalty_parameter_sequence = as.matrix(lambda1)
colnames(penalty_parameter_sequence) = "lambda1"
penalty_parameter_sequence_all = penalty_parameter_sequence
model_info = list(intercept = full_model$interceptlist[[opt_ind]], beta = full_model$betalist[[opt_ind]],
penalty_parameter_sequence = penalty_parameter_sequence,
opt_penalty_parameter = penalty_parameter_sequence[opt_ind,],
cv_error = tune_cost,
alpha = alpha, penalty = penalty, single_rule = FALSE,
number_covariates = p, number_studies_or_outcomes = q,
Xlist = Xlist, Ylist = Ylist, Trtlist = Trtlist, Plist = Plist,
Ybar = Ybarlist, Xbar = Xbarlist, Xsd = Xsdlist,
problem = problem)
} else if (penalty %in% c("SGL+SL")){
opt_ind = which(tune_cost == min(tune_cost), arr.ind = TRUE)
opt_ind1 = opt_ind[1]; opt_ind2 = opt_ind[2]
full_model = sparse_group_fused_lasso_method(modelYlist = modelYlist, modelXlist = modelXlist,
Ybarlist = Ybarlist, Xbarlist = Xbarlist, Xsdlist = Xsdlist,
lambda = lambda1, alpha = alpha, tau0 = tau0[opt_ind2],
nlambda = num_lambda1)
penalty_parameter_sequence = full_model$penalty_parameter_sequence
#opt_penalty_parameter = penalty_parameter_sequence[(opt_ind1 - 1) * length(lambda2) + opt_ind2,]
#opt_penalty_parameter = penalty_parameter_sequence[opt_ind1, ]
opt_penalty_parameter = penalty_parameter_sequence[opt_ind1,]
model_info = list(intercept = full_model$interceptlist[[opt_ind1]],
beta = full_model$betalist[[opt_ind1]],
penalty_parameter_sequence = penalty_parameter_sequence_all,
opt_penalty_parameter = opt_penalty_parameter,
cv_error = tune_cost,
alpha = alpha, penalty = penalty, single_rule = FALSE,
number_covariates = p, number_studies_or_outcomes = q,
Xlist = Xlist, Ylist = Ylist, Trtlist = Trtlist, Plist = Plist,
Ybar = Ybarlist, Xbar = Xbarlist, Xsd = Xsdlist,
problem = problem)
}
}
class(model_info) = "mp_cv"
return(model_info)
}
|
a3e9a7f46b7614d5ad4ebe110367a3531a45f715
|
c9555ae7694cf75e459a4b7026c1a003033dd83b
|
/wareHouse/website_warehouse/pruet/hydro/global.R
|
4b33437a85caa6535e04be1a778d06c9db9110de
|
[
"MIT"
] |
permissive
|
YutingYao/Ag
|
4285fa9d1c942448ed072200e798dc5f51fd9f29
|
fe73916f6e549ff6cc20bfed96a583e3919ac115
|
refs/heads/master
| 2023-06-17T21:58:14.849291
| 2021-07-20T17:28:30
| 2021-07-20T17:28:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,083
|
r
|
global.R
|
### Packages ###
require(shiny)
require(shinydashboard)
require(purrr)
require(tidyr)
require(dplyr)
require(tibble)
require(ggplot2)
require(leaflet)
require(stringr)
require(lubridate)
require(rgdal)
### User Defined Functions ###
source("functions/read_RDS.R") ## read RDS data files
source("functions/multiplot.R") ## function to plot
source("functions/calc.R") ## calculates return intensity
### Data frame of file names and lat, longs ###
map_df <- readRDS("RDS/spatial.rds")
### Import Spatial Data ###
# Server
skagit <- readOGR("geo/Skagit.geo.json", "OGRGeoJSON") # Skagit County
snohomish <- readOGR("geo/Snohomish.geo.json", "OGRGeoJSON") # Snohomish County
whatcom <- readOGR("geo/Whatcom.geo.json", "OGRGeoJSON") # Whatcom County
# Desktop
# skagit <- readOGR("geo/Skagit.geo.json") # Skagit County
# snohomish <- readOGR("geo/Snohomish.geo.json") # Snohomish County
# whatcom <- readOGR("geo/Whatcom.geo.json") # Whatcom County
# Bind all counties data
counties <- rbind(skagit, snohomish, whatcom, makeUniqueIDs = TRUE)
|
b35d05d21b17d375489ea7ee796da32ac5b8f5d4
|
22903e55d4c63712f23e345604df7629865d7671
|
/data-raw/catmap164/run.R
|
606c74cee420c7a6332a33090db2da9a7d45848d
|
[] |
no_license
|
tpq/catmap
|
864954b17f691e684e85ff9a7b214d27974a3b8f
|
79462202636c209a00efbebedb318a3b96ce6d0a
|
refs/heads/master
| 2021-01-20T00:16:25.473873
| 2017-06-29T00:24:16
| 2017-06-29T00:24:16
| 89,104,691
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 367
|
r
|
run.R
|
# Name the working directory where your file is located:
setwd("./data-raw/catmap164")
library(catmap)
data(catmapdata)
# Build cm.obj from file location
cm.obj <- catmap(catmapdata, 0.95, TRUE)
# Call catmap functions
catmap.forest(cm.obj, TRUE, TRUE)
catmap.sense(cm.obj, TRUE, TRUE, TRUE)
catmap.cumulative(cm.obj, TRUE, TRUE, TRUE)
catmap.funnel(cm.obj, TRUE)
|
6f1717845bf0ff594ef763c9af9d5c1ead40ab67
|
156811aac95d26f45fa74d249f416e32254fb4eb
|
/Machine learning KNN.R
|
36e6e14abc61d5874356204d34266f7387f0f69e
|
[] |
no_license
|
wangrenfeng0/SMU-Data-Science
|
652fe6e37541cd9ab42881229bcfb1aeab3bbb03
|
cc7ff4b0ee248dae7e515af4f59390ba6297d7d5
|
refs/heads/master
| 2023-01-07T11:40:30.836318
| 2020-10-21T03:26:04
| 2020-10-21T03:26:04
| 288,824,372
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,709
|
r
|
Machine learning KNN.R
|
# Unit 6 KNN and K-Means
```{r}
#install package and load library
#install.packages("class")
#install.packages("caret")
#install.packages("e1071")
library(class)
library(caret)
library(e1071)
library(ggplot2)
library(dplyr)
library(magrittr)
# Simple Example Credit Rating as a Function of income and debt
dfTrain = data.frame(income = c(34,67,70,110,89,90,102,104,110,120,170),
CreditRating = c(750,680,670,675,710,690,715,720,650,710,720),
Qualify = c("Yes","No","No","Yes","No","No","Yes","Yes","No","Yes","Yes"))
dfTrain %>% ggplot(aes(x = CreditRating, y = income, color = Qualify)) + geom_point()
dfTest = data.frame(income = 92, CreditRating = 694)
knn(dfTrain[,1:2], dfTest, dfTrain$Qualify, k = 5, prob = TRUE)
```
#Iris Example Classification
```{r}
irisVersVirg = iris %>% filter(Species == "versicolor" | Species == "virginica")
df = data.frame(Sepal.Length = 6.20 , Sepal.Width = 2.80 )
knn(irisVersVirg[,c(1,2)], df, irisVersVirg$Species, k = 5, prob = TRUE)
knn(irisVersVirg[,c(1,2)], df, irisVersVirg$Species, k = 15, prob = TRUE)
```
df1= data.frame(Sepal.Length=6.1, Sepal.Width=2.5)
knn(irisVersVirg[,c(1,2)], df1,irisVersVirg$Specie, k=5, prob=T)
knn(irisVersVirg[,c(1,2)], df1,irisVersVirg$Specie, k=15, prob=T)
df2=data.frame(Sepal.Width=6.2,Petal.Length=4.9)
knn(irisVersVirg[,2:3], df2,irisVersVirg$Species, k=5, prob=T)
knn(irisVersVirg[,2:3], df2,irisVersVirg$Species, k=15, prob=T)
Emails = data.frame(Predicted = c("Spam","Ham","Ham", "Ham", "Ham", "Spam", "Ham", "Spam", "Ham", "Spam"), Actual = c("Spam", "Spam", "Ham", "Ham", "Spam", "Ham", "Spam","Ham","Spam","Spam" ))
table(Emails)
confusionMatrix(table(Emails))
#Iris Example Cross Validation
#Virginica v. Versicolor
set.seed(6)
splitPerc = .75
irisVersVirg = iris %>% filter(Species == "versicolor" | Species == "virginica")
summary(irisVersVirg)
irisVersVirg = droplevels(irisVersVirg,exclude = "setosa")
summary(irisVersVirg)
trainIndices = sample(1:dim(irisVersVirg)[1],round(splitPerc * dim(irisVersVirg)[1]))
train = irisVersVirg[trainIndices,]
test = irisVersVirg[-trainIndices,]
irisVersVirg %>% ggplot(aes(x = Sepal.Length,Sepal.Width,color = Species)) + geom_point()
# k = 3
classifications = knn(train[,c(1,2)],test[,c(1,2)],train$Species, prob = TRUE, k = 3)
table(classifications,test$Species)
confusionMatrix(table(classifications,test$Species))
# k = 5
classifications = knn(train[,c(1,2)],test[,c(1,2)],train$Species, prob = TRUE, k = 5)
table(test$Species,classifications)
confusionMatrix(table(test$Species,classifications))
# k = 10
classifications = knn(train[,c(1,2)],test[,c(1,2)],train$Species, prob = TRUE, k = 10)
table(test$Species,classifications)
confusionMatrix(table(test$Species,classifications))
# k = 20
classifications = knn(train[,c(1,2)],test[,c(1,2)],train$Species, prob = TRUE, k = 20)
table(test$Species,classifications)
CM = confusionMatrix(table(test$Species,classifications))
CM$overall[1]
```
## Loop for many k and one training / test partition
```{r}
accs = data.frame(accuracy = numeric(30), k = numeric(30))
for(i in 1:30)
{
classifications = knn(train[,c(1,2)],test[,c(1,2)],train$Species, prob = TRUE, k = i)
table(test$Species,classifications)
CM = confusionMatrix(table(test$Species,classifications))
accs$accuracy[i] = CM$overall[1]
accs$k[i] = i
}
plot(accs$k,accs$accuracy, type = "l", xlab = "k")
```
# Loop for many k and the average of many training / test partition
```{r}
iterations = 500
numks = 30
masterAcc = matrix(nrow = iterations, ncol = numks)
for(j in 1:iterations)
{
accs = data.frame(accuracy = numeric(30), k = numeric(30))
trainIndices = sample(1:dim(irisVersVirg)[1],round(splitPerc * dim(irisVersVirg)[1]))
train = irisVersVirg[trainIndices,]
test = irisVersVirg[-trainIndices,]
for(i in 1:numks)
{
classifications = knn(train[,c(1,3)],test[,c(1,3)],train$Species, prob = TRUE, k = i)
table(classifications,test$Species)
CM = confusionMatrix(table(classifications,test$Species))
masterAcc[j,i] = CM$overall[1]
}
}
MeanAcc = colMeans(masterAcc)
plot(seq(1,numks,1),MeanAcc, type = "l")
```
#Internal Cross Validation
```{r}
# Simple Example Credit Rating as a Function of income and debt
df = data.frame(income = c(34,67,70,110,89,90,102,104,110,120,170),
CreditRating = c(750,680,670,675,710,690,715,720,650,710,720),
Qualify = c("Yes","No","No","Yes","No","No","Yes","Yes","No","Yes","Yes"))
knn.cv(df[,1:2], df$Qualify, k = 3)
```
irisclassified=knn.cv(iris[,1:4],iris$Species,k=10)
confusionMatrix(table(iris[,5], irisclassified))
#Standardization
```{r}
# Simple Example Credit Rating as a Function of income and debt...Not standardized
dfTrain = data.frame(income = c(34000,67000,70000,110000,89000,90000,102000,104000,110000,120000,170000),
CreditRating = c(750,680,670,675,710,690,715,720,650,710,720),
Qualify = c("Yes","No","No","Yes","No","No","Yes","Yes","No","Yes","Yes"))
classifications = knn.cv(dfTrain[,1:2],dfTrain$Qualify, k = 3)
confusionMatrix(classifications,dfTrain$Qualify)
# Simple Example Credit Rating as a Function of income and debt ... Standardized
dfTrain = data.frame(income = c(34,67,70,110,89,90,102,104,110,120,170),
CreditRating = c(750,680,670,675,710,690,715,720,650,710,720),
Qualify = c("Yes","No","No","Yes","No","No","Yes","Yes","No","Yes","Yes"))
dfZTrain = data.frame(Zincome = scale(dfTrain$income), ZCreditRating = scale(dfTrain$CreditRating), Qualify = dfTrain$Qualify)
classifications = knn.cv(dfZTrain[,1:2],dfZTrain$Qualify, k = 3)
confusionMatrix(classifications,dfTrain$Qualify)
# Simple Example Credit Rating as a Function of income and debt ... Similar Scale
dfTrain = data.frame(income = c(34,67,70,110,89,90,102,104,110,120,170),
CreditRating = c(750,680,670,675,710,690,715,720,650,710,720),
Qualify = c("Yes","No","No","Yes","No","No","Yes","Yes","No","Yes","Yes"))
dfTest = data.frame(income = 92, CreditRating = 694)
knn(dfTrain[,1:2], dfTest, dfTrain$Qualify, k = 5, prob = TRUE)
classifications = knn.cv(dfTrain[,1:2],dfTrain$Qualify, k = 3)
confusionMatrix(classifications,dfTrain$Qualify)
```
#Example Default
```{r}
#read in data Credit Default.csv
credit = read.csv(file.choose(),header = TRUE)
#make resposnse a factor rather than 0,1
credit$default.payment.next.month = factor(credit$default.payment.next.month,labels = c("NoDefault","Default"))
summary(credit)
#plot the data
credit %>% ggplot(aes(x = AGE, y = LIMIT_BAL,color = default.payment.next.month)) + geom_point()
#Create standardized variables for later.
#credit$Z_Lim = (credit$LIMIT_BAL-mean(credit$LIMIT_BAL))/sd(credit$LIMIT_BAL)
#credit$Z_AGE = (credit$AGE-mean(credit$AGE))/sd(credit$AGE)
credit$Z_Lim = scale(credit$LIMIT_BAL)
credit$Z_AGE = scale(credit$AGE)
#create training and test sets
trainInd = sample(seq(1,30000,1), .8*30000)
train = credit[trainInd,]
test = credit[-trainInd,]
#External CV
#Raw Limit and AGE
classifications = knn(train[,c(2,6)],test[,c(2,6)],train$default.payment.next.month,prob = TRUE, k = 5)
confusionMatrix(table(classifications,test$default.payment.next.month))
#Standardized
classifications = knn(train[,c(15,16)],test[,c(15,16)],train$default.payment.next.month,prob = TRUE, k = 5)
confusionMatrix(table(classifications,test$default.payment.next.month))
#Internal CV
#Raw Limit and AGE
classifications = knn.cv(credit[,c(2,6)],credit$default.payment.next.month,prob = TRUE, k = 5)
confusionMatrix(table(classifications,credit$default.payment.next.month))
#Standardized
classifications = knn.cv(credit[,c(15,16)],credit$default.payment.next.month,prob = TRUE, k = 5)
confusionMatrix(table(classifications,credit$default.payment.next.month))
```
#Multinomial Example: Iris Data
```{r}
#Iris Example Classification
#Plot
df = data.frame(Sepal.Length = 6.20 , Sepal.Width = 2.80 )
knn(iris[,c(1,2)], df, iris$Species, k = 3, prob = TRUE)
knn(iris[,c(1,2)], df, iris$Species, k = 15, prob = TRUE)
df = data.frame(Sepal.Length = 5.02 , Sepal.Width = 4.02 )
knn(iris[,c(1,2)], df, iris$Species, k = 3, prob = TRUE)
knn(iris[,c(1,2)], df, iris$Species, k = 15, prob = TRUE)
df = data.frame(Sepal.Length = 5.5 , Sepal.Width = 3.25 )
knn(iris[,c(1,2)], df, iris$Species, k = 3, prob = TRUE)
knn(iris[,c(1,2)], df, iris$Species, k = 15, prob = TRUE)
knn(iris[,c(1,2)], df, iris$Species, k = 50, prob = TRUE)
knn(iris[,c(1,2)], df, iris$Species, k = 90, prob = TRUE)
# Archeology
```{r}
pottery = read.csv(file.choose(),header = TRUE)
pottery
confusionMatrix(table(knn.cv(pottery[,1:5],pottery$Site, k = 3), pottery$Site))
QOI = data.frame(Al = 21, Fe = 6.7, Mg = 4.9, Ca = 0.10, Na = 0.11)
knn(pottery[,1:5],QOI,pottery$Site, prob = TRUE, k = 3)
knn(pottery[,1:5],QOI,pottery$Site, prob = TRUE, k = 5)
```
#For Live Session
```{r}
#Use a 70 - 30 train/test split to use cross validation to
#tune the hyperparameter k
# Loop for many k and the average of many training / test partition
set.seed(1)
iterations = 500
numks = 60
splitPerc = .95
masterAcc = matrix(nrow = iterations, ncol = numks)
for(j in 1:iterations)
{
trainIndices = sample(1:dim(iris)[1],round(splitPerc * dim(iris)[1]))
train = iris[trainIndices,]
test = iris[-trainIndices,]
for(i in 1:numks)
{
classifications = knn(train[,c(1,3)],test[,c(1,3)],train$Species, prob = TRUE, k = i)
table(classifications,test$Species)
CM = confusionMatrix(table(classifications,test$Species))
masterAcc[j,i] = CM$overall[1]
}
}
MeanAcc = colMeans(masterAcc)
plot(seq(1,numks,1),MeanAcc, type = "l")
which.max(MeanAcc)
max(MeanAcc)
# FOR LIVE SESSION LEAVE 1 OUT KNN IRIS
set.seed(1)
iterations = 500
numks = 90
masterAcc = matrix(nrow = iterations, ncol = numks)
for(j in 1:iterations)
{
for(i in 1:numks)
{
CM = confusionMatrix(table(iris[,5],knn.cv(iris[,c(1,2)],iris[,5],k = i)))
masterAcc[j,i] = CM$overall[1]
}
}
MeanAcc = colMeans(masterAcc)
plot(seq(1,numks,1),MeanAcc, type = "l")
which.max(MeanAcc)
max(MeanAcc)
```
#Extras
##tune k (hyperparameter)
```{r}
iterations = 20
numks = 30
masterAcc = matrix(nrow = iterations, ncol = numks)
for(j in 1:iterations)
{
trainInd = sample(seq(1,30000,1), .8*30000)
train = credit[trainInd,]
test = credit[-trainInd,]
for(i in 1:numks)
{
classifications = knn(train[,c(2,6)],test[,c(2,6)],train$default.payment.next.month,prob = TRUE, k = i)
CM = confusionMatrix(table(classifications,test$default.payment.next.month))
masterAcc[j,i] = CM$overall[1]
}
}
MeanAcc = colMeans(masterAcc)
plot(seq(1,numks,1),MeanAcc, type = "l")
|
eb4cd8181a91e975d82114eec6336eab31a400b5
|
787ca972a55f78a582999cc016f61275d225b701
|
/plot4.R
|
9d5f32a08aaa8ac83a6200ed17ba3bf1a55d193b
|
[] |
no_license
|
qcbit/ExData_Plotting1
|
2397ee5b8a4defb30c1df7c0e63c3356d56eccef
|
4e3884c696034597d31126a0d9e6aee99686d98b
|
refs/heads/master
| 2021-01-18T02:42:25.661909
| 2014-10-08T21:46:08
| 2014-10-08T21:46:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,015
|
r
|
plot4.R
|
library(lubridate)
d1<-read.table("household_power_consumption.txt", na.strings = "?", header=TRUE, sep=";")
d2<-within(d1, Datetime<-as.POSIXct(paste(Date, Time), format="%d/%m/%Y %T"))
d3<-subset(d2, year(Datetime) == 2007 & month(Datetime) == 2 & day(Datetime) >= 1 & day(Datetime) <= 2)
par(mfcol=c(2,2))
#plot 1
plot(x=d3$Datetime, y=d3$Global_active_power, type = "l", lab=c(3,5,7), xlab="", ylab="Global Active Power (kilowatts)")
#plot 2
plot(x=d3$Datetime, y=d3$Sub_metering_1, type = "l", xlab="", ylab="Energy sub metering")
lines(x=d3$Datetime, y=d3$Sub_metering_2, col="Red")
lines(x=d3$Datetime, y=d3$Sub_metering_3, col="Blue")
legend("topright", lty=1, col = c("Black","Red","Blue"), legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
#plot 3
plot(x=d3$Datetime, y=d3$Voltage, type = "l", xlab="datetime", ylab="Voltage")
#plot 4
plot(x=d3$Datetime, y=d3$Global_reactive_power, type = "l", xlab="datetime", ylab="Global_reactive_power")
dev.copy(png, file = "plot4.png")
dev.off()
|
29e195227840f8124ddc09b783c530c927e96a84
|
2a002aa01c0bfa6041298b5f8b7fb017c6277501
|
/man/uhi_stats.Rd
|
01e83a72ff1710f89f262bdbf9efc394db2ff2fc
|
[] |
no_license
|
RichardLemoine/LSTtools
|
185fdd168eb0ccf80fb28a7de4894e6d926d1dda
|
4cd5a3b90d954c7f3b7dc99449268f7d4e94f6f7
|
refs/heads/master
| 2022-12-29T02:40:06.582598
| 2020-10-15T14:13:24
| 2020-10-15T14:13:24
| 285,254,084
| 7
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,888
|
rd
|
uhi_stats.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/UHI_indicators.R
\name{uhi_stats}
\alias{uhi_stats}
\title{uhi_stats}
\usage{
uhi_stats(x, y, id = NULL)
}
\arguments{
\item{x}{LST raster layer.}
\item{y}{Raster indicating spatial limits (e.g., land cover).}
\item{id}{Numeric: ID of the spatial limit of the urban area (e.g., urban class).}
}
\value{
A data frame with descriptive statistics and difference
and magnitude UHI indicators.
}
\description{
Computes Urban Heat Island (UHI) indicators based
on a LST raster and spatial limits defined by a raster layer.
}
\details{
Computes LST descriptive statistics (i.e., min, mean,
max and sd) and the difference and magnitude UHI indicators.
Difference is computed as the mean LST urban minus mean LST
other(s) and magnitude is the maximum LST minus mean LST for each
class. x and y must have the same projection and id must be a
numeric value.
}
\examples{
\dontrun{# For urban land use class with id = 2
uhiind <- uhi_stats(lstL8, landuse, id = 2)}
}
\references{
Dousset, B. and Gourmelon, F. (2003). Satellite
multi-sensor data analysis of urban surface temperatures and
landcover. ISPRS Journal of Photogrammetry and Remote
Sensing. 58(1-2), 43-54.
Chen, X. L., Zhao, H. M., Li, P. X. and Yin, Z. Y. (2006).
Remote sensing image-based analysis of the relationship
between urban heat island and land use/cover changes. Remote
Sensing of Environment. 104(2), 133-146.
Rajasekar, U. and Weng, Q. H. (2009). Urban heat island monitoring
and analysis using a non-parametric model: A case study of
Indianapolis. ISPRS Journal of Photogrammetry and Remote
Sensing. 64(1), 86-96.
Zhou, J., Li, J. and Yue, J. (2010). Analysis of urban heat
island (UHI) in the Beijing metropolitan area by time-series
MODIS data. IEEE International Geoscience and Remote Sensing
Symposium (IGARSS 2010) (pp. 3327-3330).
}
|
6c3c40980fe4bb2331f43a984642c2d05f4a2dad
|
151afcb214c56140170f56a6a956912b38173cb0
|
/Challenge.R
|
ffeb4e5e21765cdf5b8d629de0a2f2ab97df978b
|
[] |
no_license
|
alblaine/text-analysis-using-r
|
69066b866a3db60587678c5b30590921b3ebe80d
|
af0c64e0b7488aa1b0697a4df296c985e124a86f
|
refs/heads/main
| 2023-08-02T20:45:26.984805
| 2021-09-12T17:34:06
| 2021-09-12T17:34:06
| 405,696,766
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 659
|
r
|
Challenge.R
|
# Challenge Activity:
# Use the Restaurant_Reviews.tsv dataset and do some analysis
rest_reviews <- readtext('Restaurant_Reviews.tsv')
# I. Sentiment Analysis (see Activity 3)
# 1. What review has the most positive terms?
# 2. What review has the most negative terms?
# II. Collocations (see Activity 6)
# 3. What terms tend to occur together? Find term collocations of two terms (see Activity 6, section 2)
# III. Topic Modeling
# 4. Generate 10 topics from this dataset (see Activity 7)
# IV. Naive Bayes Classification (see Activity 10)
# 5. Train and test the accuracy of a model that classifies whether a review gets a 0 or 1 'Liked' value.
|
4700772d16cf19f7df0987dca3d94aaa4825bbd0
|
0a99dd8ff6bcec8f5735fac4922d55cf24444902
|
/cachematrix.R
|
450d4f72f7a4b827b9bf4b44d87bf743d81012ac
|
[] |
no_license
|
leabuton/ProgrammingAssignment2
|
917863afb6fdebe41eec18c898c0f49308744776
|
add4c3dd85744e40fed7e757b2bc93e479a99c66
|
refs/heads/master
| 2020-05-29T11:07:35.140073
| 2015-07-25T11:33:51
| 2015-07-25T11:33:51
| 39,681,998
| 0
| 0
| null | 2015-07-25T11:22:22
| 2015-07-25T11:22:22
| null |
UTF-8
|
R
| false
| false
| 4,891
|
r
|
cachematrix.R
|
## This script is part of a Coursera course assignment. The course is
## "R Programming", the assignment is "Programming Assignment 2".
## In this script:
## makeCacheMatrix : A function that allows to cache the inverse of a matrix.
## cacheSolve: A function that allows to compare a matrix to a cached one and
## either retrieve the inverse of the matrix from the cache (if possible) or
## compute it.
## makeCacheMatrix(x)
## - x is a square matrix
## - returns a list with four elements, each of which is again a function:
## set(y) takes the matrix y and assigns it to x
## get() returns x
## setinverse(y) takes the matrix y and stores it in the variable
## inverseofx (internal variable of makeCacheMatrix(x))
## getinverse() returns inverseofx
## cacheSolve(x,y=makeCacheMatrix(x), overwrite=TRUE)
## - x is a non-singular(!) square matrix
## - y is a list as returned by makeCacheMatrix(x')
## - overwrite is a logical variable: It specifies if in the case that x!=x'
## the list y should be updated with x and the inverse of x
## (and hence x' and its inverse are lost) or if a new list
## z <- makeCacheMatrix(x) should be created with inverse of x set.
## - returns the inverse of x
## - if overwrite=FALSE and x!=x', then the list z is created in the defining
## environment of cacheSolve
## NOTE: The possibility that the matrix x is singular is not treated in the
## code below. Although there is a is.singular.matrix function in R it belongs
## to the package matrixcalc, which does not belong to base. And I didn't want
## to code the whole process of guiding a user through installing matrixcalc if
## it isn't already installed.
makeCacheMatrix <- function(x = matrix()) {
# Initializing inverseofx with NULL, to be able to track if no computation
# has been made
inverseofx <- NULL
# Constructing the function set. <<- has to be used so that y is passed to
# the variable x in the namespace of makeCacheMatrix (and not to a new
# variable x in the environment of set). And since the value of x has been
# changed inverseofx is set to NULL again, to be able to track if a
# computation was already made.
set <- function(y){
x <<- y
inverseofx <<- NULL
}
# Constructing the function get. Nomes est omen this function allows the
# user to get the value of x once makeCacheMatrix(x) was constructed.
get <- function() {
x
}
# Constructing the function setinverse. This function allows the user to
# set the value of inverseofx to computedinverse. Note that this
# function does not compute the inverse of x! computedinverse can be
# anything (which doesn't give an error because of clashing data types
# with inverseofx) the user likes.
setinverse <- function(computedinverse) {
inverseofx <<- computedinverse
}
# Constructing the function getinverse.
getinverse <- function(){
inverseofx
}
# Returning those four functions in a list, but don't print them.
invisible(list(set=set, get=get, setinverse=setinverse,
getinverse=getinverse))
}
cacheSolve <- function(x, y=makeCacheMatrix(x), overwrite=TRUE) {
# Test if x is x'. If it is there is a chance the inverse of x is in the
# cache. Else we definitely have to compute the inverse of x 'from
# 'scratch'.
if (all(x==y$get())){
inverseofxprime <- y$getinverse()
# Test if the inverse of x' is already computed. If so retrieve the
# result form the cache.
if (!is.null(inverseofxprime)){
message("The inverse of your input matrix was already in the
cache. Retrieving it from there.")
return(inverseofxprime)
}
else{
inverseofx <- solve(x)
y$setinverse(inverseofx)
return(inverseofx)
}
}
# If we got to here then x!=x'. So we distinguish depending on the value
# of overwrite.
else if (overwrite ==TRUE){
inverseofx <- solve(x)
y$setinverse(inverseofx)
return(inverseofx)
}
else {
inverseofx <- solve(x)
# Creating the cache object for x. <<- is used so that the list z
# is created in a parent environment of cacheSolve. Honestly I am
# not so sure this does not screw things if z were defined before
# and used for something completly unrelated.
z <<- makeCacheMatrix(x)
z$setinverse(inverseofx)
return(inverseofx)
}
}
|
a21d30fb230a1ca10e7530b31c029434662f4fb1
|
0078cb5b8c3a36e68f1801c1f6e129d149935ad8
|
/2016 fraud score script_update.R
|
77094836c1df7c2ed1f57e080d0da013061c045f
|
[] |
no_license
|
colejharvey/mexico_russia_fraud
|
90f6a4ccdb1b64e522bd8499f51d6cf70992e663
|
f46cf051cc3c2d7e9a85a39229f20ef6e949e2ea
|
refs/heads/master
| 2021-06-23T00:43:01.465306
| 2021-02-07T17:12:35
| 2021-02-07T17:12:35
| 190,785,454
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,007
|
r
|
2016 fraud score script_update.R
|
###Script for doing all chi square tests in one go
##Presidential elections
rm(list=ls())
russia2016 <- read.csv("C:/Users/Cole/Documents/Research topics literature/All Russian election data/2016 Russia election data full ids.csv")
####Set up ones-digits if necessary##
urdigit<-(russia2016$united.russia)%%10
kprfdigit<-(russia2016$kprf)%%10
ldprdigit<-(russia2016$ldpr)%%10
russia2016<-cbind(russia2016, urdigit, kprfdigit, ldprdigit)
####
chivalues<-matrix(NA, nrow=92, ncol=4)
p<-as.vector(rbind(.1, .1, .1, .1, .1, .1, .1, .1, .1, .1))
i<-1
for (i in 83:92){
group<-subset(russia2016, regionid==i)
if (nrow(group) > 0){
table1<-table(group$urdigit)
table2<-table(group$kprfdigit)
table3<-table(group$ldprdigit)
chi1<-chisq.test(x=table1, p=p)
chi2<-chisq.test(x=table2, p=p)
chi3<-chisq.test(x=table3, p=p)
chivalues[i, ]<-rbind(i, chi1$p.value, chi2$p.value, chi3$p.value) #Might have a type here, chi1 twice
}
else{next}
}
##Region 82 fails for KPRF and LDRP--not enough digits
##Total fraudscores
fraudscore2016<-matrix(NA, nrow=92, ncol=5)
fraudscore2016[,1]<-chivalues[,1]
fraudscore2016[,2]<-ifelse(chivalues[,2] < .05, 1, 0)
fraudscore2016[,3]<-ifelse(chivalues[,3] < .05, 1, 0)
fraudscore2016[,4]<-ifelse(chivalues[,4] < .05, 1, 0)
i<-1
for(i in 1:92){
fraudscore2016[i,5]<-sum(fraudscore2016[i,2:4], na.rm=FALSE)
}
###Getting party vote-shares
voteshares <- matrix(NA, nrow=92, ncol=4)
i<-1
for (i in 1:92){
group<-subset(russia2016, regionid==i)
if (nrow(group) > 0){
total.votes <- sum(group$valid) + sum(group$invalid)
ur.share <- sum(group$united.russia) / total.votes
kprf.share <- sum(group$kprf) / total.votes
ldpr.share <- sum(group$ldpr) / total.votes
voteshares[i, ]<-rbind(i, ur.share, kprf.share, ldpr.share)
}
else{next}
}
fraudscore2016 <- cbind(fraudscore2016, voteshares)
write.csv(fraudscore2016, "C:/Users/Cole/Documents/Research topics literature/All Russian election data/2016 fraud scores_update.csv")
|
8eddf82f7dc84288567d091041e3901bb0ab5d62
|
fe254ef6be0bd316d41b6796ef28f1c9e1d5551e
|
/R/ternaryDiagLines.R
|
dbb0caad88e816fa2a863641f200a002b6e0f088
|
[] |
no_license
|
matthias-da/robCompositions
|
89b26d1242b5370d78ceb5b99f3792f0b406289f
|
a8da6576a50b5bac4446310d7b0e7c109307ddd8
|
refs/heads/master
| 2023-09-02T15:49:40.315508
| 2023-08-23T12:54:36
| 2023-08-23T12:54:36
| 14,552,562
| 8
| 6
| null | 2019-12-12T15:20:57
| 2013-11-20T09:44:25
|
C++
|
UTF-8
|
R
| false
| false
| 225
|
r
|
ternaryDiagLines.R
|
ternaryDiagLines <- function(x, ...){
s <- rowSums(x)
if (any(s <= 0))
stop("rowSums of the input data x must be positive.")
x <- x/s
top <- sqrt(3)/2
xp <- x[, 2] + x[, 3]/2
yp <- x[, 3] * top
lines(xp, yp, ...)
}
|
ca84c859dfdb2f805eac4c6813e2fdcf0c1f47a4
|
86e18632193c38dacb08b4226d3938f09c04c2e8
|
/Pararrel export script.R
|
6035b43ef4cae23d9f7c990a6bb67741661a252f
|
[] |
no_license
|
chreehan/Dissertation-Code
|
542e9cf7f934cc535c8b95e380b5d9ff5393d8e6
|
84c21fe67fa24ade1dc5451c84112877ce2da34b
|
refs/heads/master
| 2021-05-29T16:02:49.796887
| 2015-10-12T05:15:01
| 2015-10-12T05:15:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,474
|
r
|
Pararrel export script.R
|
load("workspaceImage.RData")
install.packages("tsDyn")
install.packages("dplyr")
install.packages("data.table")
install.packages("urca")
library(tsDyn)
library(dplyr)
library(data.table)
library(urca)
mastertable1 = data.table(CNHCoefm = 1:2000000, CNYCoefm = 1:2000000, CnhTstat = 1:2000000,
RanNum = 0, DateCode = 0, EntryCode = 0, CnyTstat = 1:2000000,
Date = "none")
for (i in 754:500) {
p = (i * 1000 - 1000)
for (g in 1:911) {
filecnh = unlist(listCNH[[i]])
filecny = unlist(listCNY[[i]])
VECMtable = na.omit(data.table(CNH = filecnh, CNY = filecny, date = datelist))[g:(g+99)]
name1 = VECMtable$date[1]
name2 = VECMtable$date[length(VECMtable$date)]
name = paste(name1, name2, sep = ' till ')
VECMtable[,date:=NULL]
x = cajorls(ca.jo(VECMtable, type="eigen", K=5, spec="longrun"))
x1 = summary(x$rlm)
mastertable1$CNHCoefm[(p + g)] = x$rlm$coefficients[1,1] #CNH coef
mastertable1$CNYCoefm[(p + g)] = x$rlm$coefficients[1,2] #CNY coef
mastertable1$CnhTstat[(p + g)] = x1$`Response CNH.d`$coefficients[1,3] #ecterm t stat for CNH
mastertable1$CnyTstat[(p + g)] = x1$`Response CNY.d`$coefficients[1,3] #ec term t stat for CNY
mastertable1$Date[(p + g)] = name
mastertable1$RanNum[(p + g)] = i
mastertable1$DateCode[(p + g)] = g
mastertable1$EntryCode[(p + g)] = (p + g)
print((p + g))
}}
write.csv(mastertable1, "mastertable.csv")
|
6b320988aa215d3fdb0089a13fefa62f3e306ab0
|
67df5e7f56b5458ea7b003dbb927bd467129da0e
|
/Stare_rzeczy/Wykres_spend_RD/skrypt_spend_RD.r
|
0b04524202cf2b4ca98d8addbb9c2f1abf487a35
|
[] |
no_license
|
arctickey/TWD_01
|
248591e38f4bd37caf727f93cd989b0771390bcb
|
caaa103e6dd05cf7d6c5dc29ca0511ae4de3bac8
|
refs/heads/master
| 2020-08-21T15:27:23.142596
| 2020-03-31T07:07:45
| 2020-03-31T07:07:45
| 240,883,627
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,766
|
r
|
skrypt_spend_RD.r
|
library(dplyr)
library(haven)
library(intsvy)
library(countrycode)
#Wczytanie ramki danych z wynikami
#dane <- haven::read_sas("~/Programowanie/TWD_01/cy6_ms_cmb_stu_qqq.sas7bdat")
#Wczytanie danych o współczynniku liczba uczniów na nauczyciela (pochodzenie danych OECD)
rd_data <- read.csv("Wykres_spend_RD/Spend_on_RD.csv")
#Usrednienie wartosci wspolczynnika dla krajow z kilku lat badan
rd_data <- group_by(rd_data, LOCATION) %>%
summarise(., avg_rd = mean(Value, na.rm = TRUE))
#Wczytanie etykiet krajow
codes <- countrycode::codelist %>% select(., c(2, 5, 36))
#Obliczenie srednich wynikow dla danego kraju (pakiet intsvy)
result <- pisa2015.mean.pv(c("MATH", "SCIE", "READ"), by = "CNT", data = dane)
result <- select(result, CNT, Mean)
#Polaczenie ze wspolczynnikiem i dopisanie pelnych nazw panstw
final <- inner_join(result, rd_data, by = c("CNT" = "LOCATION")) %>% left_join(., y = codes, by = c("CNT" = "iso3c"))
ggplot(final, aes(x = avg_rd, y=Mean, colour=continent, label = country.name.en))+
geom_point(size=4)+
stat_smooth(method = "auto",inherit.aes = FALSE,aes(x=avg_rd,y=Mean))+
ggtitle("Średni wynik względem wydatków na Reaserch & Development")+
labs(y = "Średni wynik w kraju", x="% PKB wydawany na R&D",color="Kontynent")+
theme(axis.title.x = element_text(size = 14), axis.title.y = element_text(size = 14))+
geom_label_repel( data = final[(final$avg_rd > 4),], color = 'black') +
geom_label_repel( data = final[(final$Mean>520),], color = 'black')+
geom_label_repel( data = final[(final$Mean<430),], color = 'black')+
geom_label_repel( data = final[(final$avg_rd > 2)&(final$avg_rd < 3)&(final$Mean<490),], color = 'black')
#ggsave("Wykres_spend_RD/all_student_spend_RD_auto.png",width = 18, height = 9)
|
58fa027f0e2c7086ee0361e5d3ee696c3baa55d9
|
304a22604d0db86b3b218a4b8c0263f1294911b7
|
/R/Parse_Tables.R
|
be582446668b7395df54e9776da5b6ad7c0081f3
|
[] |
no_license
|
hansthompson/pdfHarvester
|
3ec9e0dcfaa57a97e85c45eca91bf2f7df31f4bd
|
3522125974a08f6c6296cfa56487115fd7e0d278
|
refs/heads/master
| 2021-01-13T02:18:01.192974
| 2014-01-25T17:18:49
| 2014-01-25T17:18:49
| 16,054,577
| 11
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,971
|
r
|
Parse_Tables.R
|
#' @export
Parse_Tables <- function(project) {
require(png)
folders <- list.dirs(project, recursive = FALSE)
for(z in seq(folders)) {
filefolder <- folders[z]
filevector <- list.files(paste(filefolder, "/LowQuality", sep = ""), pattern = ".png", full.names = T)
if(is.na(filevector)) {next()}
nfiles <- length(filevector)
tablecoordinates <- list()
##create a concatenation for the pages with tables in the for loop below.
pagenumber <- c()
for (i in seq(nfiles)) {
jpegpagesub <- readPNG(paste(filevector[i]))
dimmygraph <- dim(jpegpagesub)
dev.new(width = dimmygraph[2], height = dimmygraph[1])
par(mar = c(0, 0, 0, 0))
x <- c(0, dimmygraph[2])
y <- c(0, dimmygraph[1])
plot(x,y, type = "n", xlab="", ylab="",xaxt='n',yaxt='n')
rasterImage(jpegpagesub, x[1],y[1],x[2],y[2],interpolate= FALSE)
coord <- locator(type='p',pch=3,col='red',lwd=1.2,cex=1.2)
tablecoordinates[[i]] <- data.frame(x = coord$x, y = coord$y)
dev.off()
}
noTables <- which(unlist(lapply(tablecoordinates, length)) == 0)
properPages <- which(unlist(lapply(tablecoordinates, function(q) length(q$x) %% 2 == 0 )))
ifelse(length(noTables) != 0, tablePages <- properPages[-noTables], tablePages <- properPages)
tablecoordinates <- tablecoordinates[tablePages]
names(tablecoordinates) <- paste(tablePages)
pages <- as.numeric(names(tablecoordinates))
for (i in 1:length(tablecoordinates)) {
tabledim <- data.frame(x = tablecoordinates[[i]]$x, y = tablecoordinates[[i]]$y)
write.csv(tablecoordinates, "tablecoordinates.csv")
for (j in 0:((length(tablecoordinates[[i]]$x) / 2) - 1)) {
start <- j*2 + 1
stop <- j*2 + 2
imag <- readPNG(gsub("LowQuality", "HighQuality", filevector[pages[i]]))
dimChange <- dim(imag)/dim(readPNG(filevector[1]))
tabledim$x <- tabledim$x * dimChange[1]
tabledim$y <- tabledim$y * dimChange[2]
ytot <- dim(imag)[1]
xtot <- dim(imag)[2]
png(gsub("LowQuality", "TableImages", gsub(".png", paste( "_",j+1, ".png", sep =""), filevector[i])),
width= tabledim[stop,1]-tabledim[start ,1],
height = (ytot-tabledim[stop,2]) - (ytot-tabledim[start,2]), units = "px")
par(oma = c(0, 0, 0, 0))
par(mar = c(0, 0, 0, 0))
plot(x = c(tabledim[start,1],tabledim[stop,1]),
y = c(tabledim[stop,2],tabledim[start,2]),
type = "n", xlab="", ylab="",xaxt='n',yaxt='n')
rasterImage(image = imag[(ytot-tabledim[start,2]):(ytot-tabledim[stop,2]), tabledim[start ,1]:tabledim[stop,1],],
xleft = tabledim[start,1],
ybottom = tabledim[stop,2],
xright = tabledim[stop,1],
ytop = tabledim[start ,2] ,
interpolate = FALSE)
dev.off()
}
}
}
}
|
bdf35f74fdc0bb4f28e4829c39a4628c6bd2ee11
|
6300606517c0dcaae4dce093a8366eea953deb37
|
/2015/solutionsR/day04.R
|
a8ff2b394ba9042efb23fb065389706711e965d9
|
[] |
no_license
|
akulumbeg/adventofcode
|
e5b5f8e509d240279ce0b4daf7325a48a4cbf9fc
|
71d2a329beb4dd42d7e9dd6f544aa0c8fbc343cd
|
refs/heads/master
| 2022-07-28T16:59:30.147837
| 2022-07-11T14:46:15
| 2022-07-11T14:46:15
| 220,464,721
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,404
|
r
|
day04.R
|
# Day 4 - Part 1 ----------------------------------------------------------
input <- readLines("2015/data/day04.txt", warn = F) # load the data
input <- paste0(input, 1:250000) # 250000 is a wild guess
# I have to break my rule of not using external packages
# because I cannot code an md5 generator from scratch
install.packages("digest")
library(digest)
outcome <- sapply(input, function(x) digest(x, algo = "md5", serialize = F))
# perform the function on every element of the large char vector
# serialize = F assures consistency with online md5 generators
which(grepl("^00000", outcome, perl = T, useBytes = T)) # which element starts with 5 0s?
# Day 4 - Part 2 ----------------------------------------------------------
input <- readLines("2015/data/day04.txt", warn = F) # load the data
input <- paste0(input, 1:5000000) # 5000000 is a also a guess
# I have to break my rule of not using external packages
# because I cannot code an md5 generator from scratch
install.packages("digest")
library(digest)
outcome <- sapply(input, function(x) digest(x, algo = "md5", serialize = F))
# perform the function on every element of the large char vector
# serialize = F assures consistency with online md5 generators
which(grepl("^000000", outcome, perl = T, useBytes = T)) # which element starts with 6 0s?
# might take a bit at first
|
f63c7b0de41c66e28c2c67f3324e13c90f28157f
|
9ad4b4acb8bd2b54fd7b82526df75c595bc614f7
|
/Cleaning/SeuratNorm.R
|
b9421b94bde559adb772d1622a4424ef4ae58671
|
[] |
no_license
|
sylvia-science/Ghobrial_EloRD
|
f27d2ff20bb5bbb90aa6c3a1d789c625540fbc42
|
041da78479433ab73335b09ed69bfdf6982e7acc
|
refs/heads/master
| 2023-03-31T14:46:27.999296
| 2021-04-02T15:09:49
| 2021-04-02T15:09:49
| 301,811,184
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,053
|
r
|
SeuratNorm.R
|
library(Seurat)
library(dplyr)
library(ggplot2)
library(cowplot)
library(Matrix)
library(resample )
library(readxl)
source('/home/sujwary/Desktop/scRNA/Code/Functions.R')
source('/home/sujwary/Desktop/scRNA/Code/Plot_func.R')
filename_sampleParam <- paste0('/home/sujwary/Desktop/scRNA/Data/sample','_parameters.xlsx')
sampleParam <- read_excel(filename_sampleParam)
filename = paste0('/home/sujwary/Desktop/scRNA/Param/','Cluster_ID_testNorm.xlsx')
cluster_id_param = read_excel(filename)
sample_list = c('GL1497BM', 'GL1160BM', 'GL2923BM', 'GL3404BM', 'NBM6CD138N', 'NBM12CD138N', 'GL2185BM', 'GL3417BM', 'GL2653BM')
sample_list = c('GL1497BM', 'GL2923BM', 'GL3404BM', 'NBM6CD138N', 'NBM12CD138N', 'GL2185BM', 'GL3417BM', 'GL2653BM')
sample_list = c('GL1290BM', 'GL3404BM', 'NBM6CD138N', 'NBM12CD138N', 'GL2185BM', 'GL3417BM', 'GL2653BM')
folder_input ='Soup_MT_nFeature'
folder_input ='Soup_MT_C100'
i = 6
# Soup + MT
for (i in 1:length(sample_list) ){
#for (i in 4){
sample_name = sample_list[i]
print(sample_name)
percent_mt = sampleParam$percent_mt_min[sampleParam['Sample'] == sample_name]
RNA_features_min = sampleParam$RNA_features_min[sampleParam['Sample'] == sample_name]
RNA_features_max = sampleParam$RNA_features_max[sampleParam['Sample'] == sample_name]
if ( folder_input == 'Soup_MT_nFeature'){
cluster_IDs = cluster_id_param$Cluster_Ids_nFeature_Seurat[cluster_id_param$Sample == sample_name]
}else{
cluster_IDs = cluster_id_param$Cluster_Ids_Seurat[cluster_id_param$Sample == sample_name]
}
print(cluster_IDs)
folder = paste0('/home/sujwary/Desktop/scRNA/Output/',folder_input,'/',sample_name,'/')
path = paste0(folder,'/',sample_name,'.Robj')
data_i_filtered_run = loadRData(path)
data_i_filtered_run = data_i_filtered_run[, data_i_filtered_run$percent.mt < percent_mt]
#data_i_filtered_run = data_i_filtered_run[, data_i_filtered_run$nFeature_RNA > RNA_features_min]
#data_i_filtered_run = data_i_filtered_run[, data_i_filtered_run$nFeature_RNA < RNA_features_max]
if ( folder_input == 'Soup_MT_nFeature'){
data_i_filtered_run = data_i_filtered_run[, data_i_filtered_run$nFeature_RNA > RNA_features_min]
data_i_filtered_run = data_i_filtered_run[, data_i_filtered_run$nFeature_RNA < RNA_features_max]
}
data_i_filtered_run = NormalizeData(data_i_filtered_run, normalization.method = "LogNormalize", scale.factor = 10000)
data_i_filtered_run = FindVariableFeatures(data_i_filtered_run, selection.method = "vst", nfeatures = 2000)
data_i_filtered_run = ScaleData(data_i_filtered_run)
data_i_filtered_run = RunPCA(data_i_filtered_run,npcs = 30)
data_i_filtered_run = FindNeighbors(data_i_filtered_run, dims = 1:30)
data_i_filtered_run = FindClusters(data_i_filtered_run, resolution = 1.2)
data_i_filtered_run = RunUMAP(data_i_filtered_run, dims = 1:30)
data_i_filtered_run = label_cells(data_i_filtered_run, cluster_IDs)
# filename = paste("/home/sujwary/Desktop/scRNA/Data/",sample_name,"_raw_feature_bc_matrix.h5",sep = "")
# data_i_raw = Read10X_h5(filename, use.names = TRUE, unique.features = TRUE)
# data_i_raw = CreateSeuratObject(counts = data_i_raw, project = "BM", min.cells = 3, min.features = 1)
#
# colSum_list = colSums(data_i_raw ) # Needs to be from Matrix library
# keep = colSum_list >= 100
# data_i_run = data_i_raw[,keep]
#
# data_i_run[["percent.mt"]] <- PercentageFeatureSet(data_i_run, pattern = "^MT-")
# data_i_run = data_i_run[, data_i_run$percent.mt < percent_mt]
# data_i_run = data_i_run[, data_i_run$nFeature_RNA > RNA_features_min]
# data_i_run = data_i_run[, data_i_run$nFeature_RNA < RNA_features_max]
#data_i_filtered_run = NormalizeData(data_i_run, normalization.method = "LogNormalize", scale.factor = 10000)
folder = paste0('/home/sujwary/Desktop/scRNA/Output/TestNormalization/',folder_input,'/Seurat/',sample_name,'/')
dir.create(folder,recursive = T)
path = paste0(folder,'/',sample_name,'.Robj')
save(data_i_filtered_run,file= path)
data_i_filtered_run = label_cells(data_i_filtered_run, cluster_IDs)
if(ncol(data_i_filtered_run)< 200){
pt.size = 3
}else{
pt.size = 1
}
print('Plot')
#################
rowMeans_list = rowMeans(data_i_filtered_run@assays[["RNA"]]@data)
bin_list = as.numeric(cut_number(rowMeans_list,6))
colSums_list = colSums(data_i_filtered_run@assays[["RNA"]]@counts)
plot_list = vector('list', 6)
plot_list_10000 = vector('list', 6)
for (bin in 1:6){
#bin = bin + 1
print(bin)
y_list = data_i_filtered_run@assays[["RNA"]]@data[bin_list == bin,]
df = as.data.frame(matrix(ncol = 2, nrow = length(colSums_list)))
colnames(df) = c('colsum','exp')
df$colsum = (colSums_list) # Unormalized per cell
df$exp = colMeans(y_list) # Norm Mean per cell
df$ID = Idents(data_i_filtered_run[bin_list == bin,])
plot = ggplot(df, aes(colsum, exp)) +
geom_point(aes(colour = df$ID), size = 2)+
stat_smooth(aes(x=colsum,y=exp), method="loess", se=F, color="tomato2") +
theme(text = element_text(size=20))
#print(plot)
plot_list[[bin]]= plot
plot = ggplot(df, aes(colsum, exp)) +
geom_point(aes(colour = df$ID), size = 2)+
stat_smooth(aes(x=colsum,y=exp), method="loess", se=F, color="tomato2") +
theme(text = element_text(size=20))+xlim(c(0,10000))
#print(plot)
plot_list_10000[[bin]]= plot
}
pathName = paste0(folder,sample_name,'_Seurat_NormVsGeneSum','.png')
png(file=pathName,width=1500, height=1000,res = 100)
plot = plot_grid(
plot_list[[1]], plot_list[[2]],plot_list[[3]],plot_list[[4]],plot_list[[5]],plot_list[[6]],
labels = "AUTO", ncol = 2)
print(plot)
dev.off()
pathName = paste0(folder,sample_name,'_Seurat_NormVsGeneSum_10000','.png')
png(file=pathName,width=1500, height=1000,res = 100)
plot = plot_grid(
plot_list_10000[[1]], plot_list_10000[[2]],plot_list_10000[[3]],plot_list_10000[[4]],plot_list_10000[[5]],plot_list_10000[[6]],
labels = "AUTO", ncol = 2)
print(plot)
dev.off()
# plot housekeeping gene variance across all cells on the y axis and total UMI count per cell on the x axis
gene_list = read.csv('/home/sujwary/Desktop/scRNA/Data/HousekeepingGenes.csv')
gene_list = as.character(gene_list[gene_list$Gene %in% rownames(data_i_filtered_run),1])
data_subset = data_i_filtered_run@assays[["RNA"]]@data[gene_list,]
var_list = colVars(as.matrix(data_subset))
mean_list = colMeans(data_subset)
UMI_per_cell = colSums(data_i_filtered_run)
df = data.frame(matrix(ncol = 2, nrow = length(var_list)))
colnames(df) = c("UMI_per_cell",'variance')
df$UMI_per_cell = UMI_per_cell
df$var_list = var_list
pathName = paste0(folder,sample_name,'_Seurat_varVsCount','.png')
png(file=pathName,width=1000, height=1000)
plot = ggplot(df, aes(UMI_per_cell, var_list)) +
geom_point(size = 4)
print(plot)
dev.off()
#next
data_i_filtered_noLabel = FindClusters(data_i_filtered_run,resolution = 1.4)
pathName = paste0(folder,sample_name,'_Seurat_Umap','_noLabel','.png')
png(file=pathName,width=1000, height=1000)
print( DimPlot(data_i_filtered_noLabel,pt.size = pt.size, reduction = "umap",label = T))
dev.off()
pathName = paste0(folder,sample_name,'_Seurat_Umap','_Label','.png')
png(file=pathName,width=1000, height=1000)
print( DimPlot(data_i_filtered_run,pt.size = pt.size, reduction = "umap",label = T))
dev.off()
pathName = paste0(folder,sample_name,'_Seurat_Umap','_percent.mt','.png')
png(file=pathName,width=1000, height=1000)
print(FeaturePlot(data_i_filtered_run,pt.size = pt.size, reduction = "umap", features = 'percent.mt'))
dev.off()
pathName = paste0(folder,sample_name,'_Seurat_Umap','_nFeature_RNA','.png')
png(file=pathName,width=1000, height=1000)
print(FeaturePlot(data_i_filtered_run,pt.size = pt.size, reduction = "umap", features = 'nFeature_RNA'))
dev.off()
next
findMarkers = F
if (findMarkers){
PCA_dim = 30
resolution_val = 0.8
num_markers = 10
markers = FindAllMarkers(data_i_filtered_run, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
markers %>% group_by(cluster) %>% top_n(n = num_markers, wt = avg_logFC)
# Plotting the top 10 markers for each cluster.
top10 = markers %>% group_by(cluster) %>% top_n(n = num_markers, wt = avg_logFC)
all_markers = markers %>% group_by(cluster)
pathName <- paste0(folder,paste0('HeatMap', '_PCA',PCA_dim,'_res',resolution_val,'.png'))
png(file=pathName,width=500, height=500)
print(DoHeatmap(data_i_filtered_run, features = top10$gene))
dev.off()
write.csv(all_markers, file = paste0(folder,'Features','.csv'),row.names=FALSE)
}
#next
folder_feature = paste0(folder,'Featureplots/' )
dir.create(folder_feature,recursive = T)
gene_list = c('CD3D', 'CD3G', 'CD3E', 'CD8A', 'CD8B', 'IL7R', 'SELL', 'CD14', 'FCGR3A', 'NKG7', 'MS4A1', 'IGKC', 'IGHM', 'CD19', 'MZB1', 'CD34', 'CDK6',
'FCER1A','FUT4', 'ELANE', 'MPO', 'HBA2', 'HBB', 'LYZ', 'TNFRSF17',' PRTN3', 'NAMPT')
if(ncol(data_i_filtered_run)< 200){
pt.size = 6
}else{
pt.size = 3
}
for (j in 1:length(gene_list)){
gene = gene_list[j]
print(gene)
#browser()
plot = FeaturePlotFix(data_i_filtered_run, feature = gene, folder =folder,
str = '',split = F, markerSize = pt.size,gene_TF = TRUE,title = '',saveTF = FALSE)
plot = plot + theme(
axis.title.x = element_text(color="black", size=24 ),
axis.title.y = element_text(color="black", size=24),
axis.text= element_text(color="black", size=24),
legend.text=element_text(size=24),
legend.title=element_text(size=24),
text = element_text(size = 20)
)
file_str = ''
pathName = paste0(folder_feature,gene,'','.png')
png(filename = pathName,width=2000, height=2000)
print(plot)
dev.off()
remove(plot)
}
}
|
9901ea9fb2dd1e4e7cf196fd92d88389fc4ab35f
|
9b013a34b3f89d3e09ad16766e574a05ad30cd28
|
/run_analysis.R
|
00f33f478125925ded8aa6526e83230f6b1775ce
|
[] |
no_license
|
ErikGiezen/Getting-cleanig-data
|
d3e121e594da006a07fcdf8b158fb994b118d04a
|
5f34442bc727a0ef9dae7a1de3a96d17d76c7318
|
refs/heads/master
| 2021-01-17T06:24:32.234836
| 2016-07-24T20:53:11
| 2016-07-24T20:53:11
| 63,984,286
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,948
|
r
|
run_analysis.R
|
## 1. Merges the training and the test sets to create one data set.
# read txt files into data frames with read.table
features <- read.table("features.txt", header = FALSE)
labels = read.table("activity_labels.txt", header=FALSE)
train_subject <- read.table("subject_train.txt", header = FALSE)
train_x <- read.table("X_train.txt", header = FALSE)
train_y <- read.table("y_train.txt", header = FALSE)
test_subject <- read.table("subject_test.txt", header = FALSE)
test_x <- read.table("X_test.txt", header = FALSE)
test_y <- read.table("y_test.txt", header = FALSE)
# add column names for measurments, subjects, variabels and labels
names(train_x) <- features[,2]
names(test_x) <- features[,2]
names(train_subject) <- "subject"
names(test_subject) <- "subject"
names(train_y) <- "activity_id"
names(test_y) <- "activity_id"
names(labels)[1]<-"id"
names(labels)[2]<-"activity"
# merge dataframes into one dataset
train <- cbind(train_x, train_y, train_subject)
test <- cbind(test_x, test_y, test_subject)
merged <- rbind(train, test)
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
selectedcolumns <- grep("(mean|std)\\(\\)|subject|activity", names(merged))
meanstdset <- merged[, selectedcolumns]
## 3. Uses descriptive activity names to name the activities in the data set.
meanstdset <- merge(meanstdset, labels, by.x="activity_id", by.y="id")
meanstdset <- subset(meanstdset, select = -activity_id)
## 4. Appropriately labels the data set with descriptive variable names.
#already done by names(train_x) <- features[,2], names(test_x) <- features[,2]
## 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
library(reshape2)
melteddata <- melt(meanstdset, id=c("subject","activity"))
eind <- dcast(melteddata, subject+activity ~ variable, mean)
write.table(eind, file = "eind.txt", row.name=FALSE)
|
71d7540b0c0eacda476f755b746854b1732f2d2a
|
33499d17a57e1c11fd2929599beef530fe9397f9
|
/run_analysis.R
|
c6b4fb57e00049da48f45c4c155c7638cefe9759
|
[] |
no_license
|
klatz/GettingCleaningData
|
877cea425bfb89bfe358d7a99ef4a50b781725ab
|
e7ece098c714723ce0b6fa985c0db4bd3223bd67
|
refs/heads/master
| 2021-01-01T19:16:20.882248
| 2014-06-26T19:23:54
| 2014-06-26T19:23:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,198
|
r
|
run_analysis.R
|
#YOU NEED plyr & reshape2 package
library(plyr)
library(reshape2)
#STEP 1
#read training dataset
#subject:who y:activity
sub_train <- read.csv("UCI HAR Dataset/train/subject_train.txt",header=FALSE)
X_train <- read.csv("UCI HAR Dataset/train/X_train.txt",sep="",header=FALSE)
y_train <- read.csv("UCI HAR Dataset/train/y_train.txt",header=FALSE)
#read test dataset
sub_test <- read.csv("UCI HAR Dataset/test/subject_test.txt",header=FALSE)
X_test <- read.csv("UCI HAR Dataset/test/X_test.txt",sep="",header=FALSE)
y_test <- read.csv("UCI HAR Dataset/test/y_test.txt",header=FALSE)
#merge training dataset
train_marge <- cbind(X_train,sub_train,y_train)
#merge test dataset
test_marge <- cbind(X_test, sub_test, y_test)
#marge training+test dataset
marge_dataset <- rbind(train_marge,test_marge)
#read features
features <- read.csv("UCI HAR Dataset/features.txt",sep="",header=FALSE)
feature_names <- c(as.vector(features[,2]),"person","activity")
#STEP 4
#set column names
colnames(marge_dataset) <- feature_names
#STEP 3
#read activities list
activities <- read.csv("UCI HAR Dataset/activity_labels.txt"
,sep="",header=FALSE)
colnames(activities) <- c("activity","activity_name")
marge_dataset <- join(marge_dataset,activities,by="activity")
#STEP 2
#choose mean/standard deviation names
mean_standard_names <- grepl('mean\\(\\)|std\\(\\)',names(marge_dataset))
#add X and y col
mean_standard_names[ncol(marge_dataset)-2]<-TRUE #person
mean_standard_names[ncol(marge_dataset)]<-TRUE #activities
#extract mean/standard dataset
mean_standard_dataset <- marge_dataset[,mean_standard_names]
#WRITE CSV&TXT extract data
write.csv(mean_standard_dataset,"tidy_dataset_1.csv")
write.table(mean_standard_dataset,"tidy_dataset_1.txt",quote=FALSE)
#STEP 5
aggregate_dataset <- aggregate(.~ person + activity_name ,data=mean_standard_dataset,FUN=mean)
#WRITE CSV&TXT extract data
write.csv(aggregate_dataset,"tidy_dataset_2.csv")
write.table(aggregate_dataset,"tidy_dataset_2.txt",quote=FALSE)
|
6f2344c90d8961e94c626e68560bb0c6b303e1ac
|
1a668e43e81a0722ae887f3a34f1da7013952a45
|
/Phylogenetic_tree.R
|
0e40246faec9dc3208577bbde1c6948b99aa63c1
|
[] |
no_license
|
xingzhis/A-dual-eigen-analysis-on-canid-genomes
|
b03b4fa06c8933d4000954e07aa31eaa6f458d33
|
e4ae77ba6988885d2cba52badfa686f65e690d86
|
refs/heads/master
| 2022-07-03T04:38:35.040861
| 2020-05-11T09:02:14
| 2020-05-11T09:02:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,572
|
r
|
Phylogenetic_tree.R
|
library(ape)
snp.tree <- read.tree("snphylo.output.ml.tree")
#plot(snp.tree)
#library(tidytree)
# to map
library(phytools)
demographic.data <- read.csv('~/demographic.csv')
demographic.data.valid <- demographic.data[!(is.na(demographic.data$Latitude)|is.na(demographic.data$Longitude)),]
lat.long <- data.frame(
lat=demographic.data.valid$Latitude,
long=demographic.data.valid$Longitude
)
row.names(lat.long) <- demographic.data.valid$SampleID
invalid.id <- as.character(demographic.data[is.na(demographic.data$Latitude)|is.na(demographic.data$Longitude),]$SampleID)
obj<-phylo.to.map(drop.tip(snp.tree,invalid.id),lat.long,plot=FALSE)
plot(obj,type="phylogram",asp=1)
# python TreeCluster.py -i snphylo.output.ml.tree2 -t 0.4 > cluster_results
cluster.result <- read.table('cluster_results',header=T)
cluster.result <- cluster.result[cluster.result$SequenceName!='Lcu2_Pasto',]
# remove invalid item
cluster.map <- data.frame(
SampleID=cluster.result$SequenceName,
Latitude=array(0,length(cluster.result$SequenceName)),
Longitude=array(0,length(cluster.result$SequenceName)),
ClusterNumber=cluster.result$ClusterNumber
)
for (i in rownames(cluster.map)) {
demog.row <- demographic.data.valid[as.character(demographic.data.valid$SampleID)==as.character(cluster.map[i,]$SampleID),]
cluster.map[i,]$Latitude <- demog.row$Latitude
cluster.map[i,]$Longitude <- demog.row$Longitude
}
#plot on map
library(ggplot2)
library(ggmap)
library(sp)
library(maptools)
library(maps)
mapworld<-borders("world",colour = "gray50",fill="white")
mp<-ggplot()+mapworld+ylim(-60,90)
mp<-mp+geom_point(aes(x=as.numeric(cluster.map$Longitude),
y=as.numeric(cluster.map$Latitude),color=as.factor(cluster.map$ClusterNumber),size=5,alpha=0.2))
# mid<-mean(unique(cluster.map$ClusterNumber))
# mp<-mp+scale_color_gradient2(midpoint=mid, low="blue", mid="white",
# high="red", space ="Lab" )
# # Install
# install.packages("wesanderson")
# # Load
library(wesanderson)
mp<-mp+scale_color_manual(values=wes_palette(name="Cavalcanti1"))
library(treeio)
library(ggtree)
snp.tree <- read.tree("snphylo.output.ml.tree")
snp.tree.plot <- ggtree(snp.tree, layout="circular") + geom_tiplab(aes(angle=angle), color='purple',hjust = -.2,size=1.5)+ xlim(0, 0.55)
setwd('~')
load('snp.svd.Rdata')
source('DualEigen_utils.R')
num_layers <- 10
u.cutoff <- 0.8 -> v.cutoff
main.pos.x <- list()
main.pos.y <- list()
for (layer in seq(num_layers)) {
u <- filter.loadings(snp.svd$u[,layer],method='2',norm.cutoff=u.cutoff)
v <- filter.loadings(snp.svd$v[,layer],method='2',norm.cutoff=v.cutoff)
main.pos.x[[layer]] <- nonzero.pos(u)
main.pos.y[[layer]] <- nonzero.pos(v)
}
# svd.cluster <- array('black',c(127,10))
# for (layer in 1:10){
# for (j in seq_along(main.pos.x[[layer]])){
# svd.cluster[main.pos.x[[layer]][j],layer] <- 'red'
# }
# }
setwd('tree-dualeigen')
for (layer in 2:10){
svd.cluster2 <- data.frame(
SampleID = demographic.data$SampleID,
color = array('grey',length(demographic.data$SampleID))
)
for (j in seq_along(main.pos.x[[layer]])){
svd.cluster2[main.pos.x[[layer]][j],]$color <- 'red'
}
snp.tree$tip.label[snp.tree$tip.label=='Lcu2_Pasto']<- 'Lcu2_Pastora'
svd.color2 <- array()
for (i in seq_along(svd.cluster2$color)){
svd.color2[i] <- svd.cluster2[svd.cluster2$SampleID==snp.tree$tip.label[i],]$color
}
pdf(paste('tree-dualeigen-',layer,'.pdf',sep=''))
plot(snp.tree, tip.color=svd.color2, type="fan",cex=0.5,x.lim=0.2)
dev.off()
}
|
f32612fb2e7a586230f4ee2d84e22c9d26038418
|
107775eb06b9233f0abdf86cf7ff8f85ab069f1f
|
/test.R
|
d25a77c697d18660942bc94e4e9b1294b4c7e66f
|
[] |
no_license
|
jt1800/project1
|
b7386ea4311e636dfe5edf3d1bf1e510016a5008
|
41e3c6d1f70777ffd43cc3f8851d859c55788862
|
refs/heads/master
| 2021-05-16T06:02:15.730150
| 2017-09-12T21:47:45
| 2017-09-12T21:47:45
| 103,324,416
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 250
|
r
|
test.R
|
collatz = function(x = 1,plot=FALSE){
nums <- c(x)
repeat{
if (x%%2 != 0){
x <- 3*x + 1
} else{
x <- x%/%2
}
nums <- append(nums,x)
if (x <= 1) break
}
if (plot) plot(1:length(nums),nums)
return(nums)
}
|
047f9b0b8d87ccf8dfe4b3da3e43d4717ec47874
|
3fdb12a1fe34aca6b96aa9047df4593404a5fc52
|
/transmodel.pub.R
|
c7ed0cfcc2ed51950b17794ad1044bbf03fe7f8b
|
[] |
no_license
|
carnegie-dpb/bartonlab-modeling
|
06c90e10df8fc37973a02db41f2c882bc8ceedfd
|
7d875f16f675bf94fc04a360ae8f6855d4642619
|
refs/heads/master
| 2021-01-22T03:48:47.674881
| 2018-04-18T22:29:04
| 2018-04-18T22:29:04
| 81,460,423
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,519
|
r
|
transmodel.pub.R
|
##
## plot linear transcription model for a direct target in one condition, custom made for publication (no colors, etc.)
##
source("rhoc.R")
source("rhon.R")
source("rhop.R")
source("Rsquared.R")
transmodel.pub = function(rhon0=1, rhoc0=20, nu=10, gamman=0.7, rhop0=1, etap=1, gammap=1, dataTimes, dataValues, dataLabel=NA, plotBars=FALSE) {
## set rhop0 = mean of first three data points
if (rhop0==0 &&&& hasArg(dataValues) &&&& hasArg(dataTimes)) rhop0 = mean(dataValues[dataTimes==0])
## calculation interval
t = (0:200)/100
## cytoplasmic GR-TF concentration
rhoc_t = rhoc(rhoc0, nu, t)
## nuclear GR-TF concentration
rhon_t = rhon(rhoc0, rhon0, nu, gamman, t)
## transcript concentration
rhop_t = rhop(rhoc0, nu, gamman, rhop0, etap, gammap, t)
## axis limits
ymin = 0
if (hasArg(dataValues)) {
ymax = max(rhon_t,rhop_t,dataValues)
plot(t, rhop_t, type="l", xlab="time (h)", ylab="model nuclear concentration (arb. units), measured expression (FPKM)", ylim=c(ymin,ymax), lty=2)
} else {
ymax = max(rhon_t,rhop_t)
plot(t, rhop_t, type="l", xlab="time (h)", ylab="model nuclear concentration", ylim=c(ymin,ymax), lty=2)
}
## compare with provided data
if (hasArg(dataTimes) && hasArg(dataValues)) {
if (plotBars) {
## plot mean and error bars
for (ti in unique(dataTimes)) {
y = mean(dataValues[dataTimes==ti])
sd = sd(dataValues[dataTimes==ti])
points(ti, y, pch=19, col="black")
segments(ti, (y-sd), ti, (y+sd), col="black")
}
} else {
## plot each point
points(dataTimes, dataValues, pch=19, col="black")
}
## get R-squared and error metric
fitValues = dataTimes
for (i in 1:length(dataTimes)) {
fitValues[i] = rhop(rhoc0, nu, gamman, rhop0, etap, gammap, dataTimes[i])
}
R2 = Rsquared(fitValues,dataValues)
error = errorMetric(fitValues,dataValues)
print(paste("error=",signif(error,6),"R2=",signif(R2,6)))
}
## plot TF concentration on right axis, this axis used for annotation
par(new=TRUE)
plot(t, rhon_t, type="l", axes=FALSE, xlab=NA, ylab=NA, ylim=c(0,max(rhon_t)), lty=1)
axis(side=4)
par(new=FALSE)
## annotation
if (hasArg(dataLabel) && !is.na(dataLabel)) {
legend(par()$xaxp[2], 0.98*par()$usr[4], xjust=1, yjust=1, lty=c(1,2,0), pch=c(-1,-1,19),
c(
expression(paste(rho[n]," ","(right axis)")),
expression(rho[p]),
dataLabel
)
)
} else {
legend(par()$xaxp[1], par()$yaxp[1], xjust=0, yjust=0, lty=c(1,2),
c(
expression(rho[n]),
expression(rho[p])
)
)
}
xtext = 0.3
ytext = par()$usr[4]
text(xtext, 0.45*ytext, bquote(rho[c0]==.(round(rhoc0,1))), pos=4, col="black")
text(xtext, 0.40*ytext, bquote(rho[n0]==.(round(rhon0,1))), pos=4, col="black")
text(xtext, 0.35*ytext, bquote(paste(nu==.(signif(nu,3))," ",h^-1)), pos=4, col="black")
text(xtext, 0.30*ytext, bquote(paste(gamma[n]==.(signif(gamman,3))," ",h^-1)), pos=4, col="black")
text(xtext, 0.25*ytext, bquote(rho[p0]==.(round(rhop0,1))), pos=4, col="black")
text(xtext, 0.20*ytext, bquote(paste(eta[p]==.(signif(etap,3))," ",h^-1)), pos=4, col="black")
text(xtext, 0.15*ytext, bquote(paste(gamma[p]==.(signif(gammap,3))," ",h^-1)), pos=4, col="black")
if (hasArg(dataTimes) && hasArg(dataValues)) {
text(xtext, 0.05*ytext, bquote(r^2==.(round(R2,2))), pos=4, col="black")
}
}
|
5e9d02229d782767d259e11064dee731d97b7e58
|
bebe94f1d0b3a30f12ad309a6629737ae706fb19
|
/R/read_rds.R
|
f3f1e9f51a2429fa5c3a569210f846c8f9b01660
|
[] |
no_license
|
BHGC/bhgc.wx
|
d19956db738f841cda62ef3e176c7a3c54ab1709
|
a4b28cfabf4ec2107799dda868601c7d3cdbdfb3
|
refs/heads/master
| 2022-03-24T21:49:31.627577
| 2020-10-11T04:20:45
| 2020-10-11T04:20:45
| 156,008,704
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,265
|
r
|
read_rds.R
|
#' Robustly Reads an RDS File
#'
#' @param pathname RDS file to read.
#'
#' @param \ldots (optional) Additional arguments passed to [base::readRDS()].
#'
#' @return The \R object read.
#'
#' @details
#' Uses [base::readRDS] internally but gives a more informative error message
#' on failure.
#'
#' @importFrom utils file_test
#' @export
#' @keywords internal
read_rds <- function(pathname, ...) {
if (!file_test("-f", pathname)) {
stop(sprintf("No such file: %s", sQuote(pathname)))
}
tryCatch({
readRDS(pathname, ...)
}, error = function(ex) {
msg <- conditionMessage(ex)
msg <- sprintf("readRDS() failed to read file %s (%.0f bytes). The reason was: %s",
sQuote(pathname), file.size(pathname), msg)
ex$message <- msg
stop(ex)
})
}
#' Robustly Saves an Object to RDS File Atomically
#'
#' @param object The \R object to be save.
#'
#' @param pathname RDS file to written.
#'
#' @param \ldots (optional) Additional arguments passed to [base::saveRDS()].
#'
#' @return (invisible) The pathname of the RDS written.
#'
#' @details
#' Uses [base::saveRDS] internally but writes the object atomically by first
#' writing to a temporary file which is then renamed.
#'
#' @importFrom utils file_test
#' @export
#' @keywords internal
save_rds <- function(object, pathname, ...) {
pathname_tmp <- sprintf("%s.tmp", pathname)
if (file_test("-f", pathname_tmp)) {
stop(sprintf("Cannot save RDS file because a temporary save file already exists: %s", sQuote(pathname_tmp)))
}
tryCatch({
saveRDS(object, file = pathname_tmp, ...)
}, error = function(ex) {
msg <- conditionMessage(ex)
msg <- sprintf("saveRDS() failed to save to temporary file %s (%.0f bytes). The reason was: %s",
sQuote(pathname_tmp), file.size(pathname_tmp), msg)
ex$message <- msg
stop(ex)
})
stopifnot(file_test("-f", pathname_tmp))
file.rename(from = pathname_tmp, to = pathname)
if (file_test("-f", pathname_tmp) || !file_test("-f", pathname)) {
msg <- sprintf("save_rds() failed to rename temporary save file %s (%0.f bytes) to %s (%0.f bytes)", sQuote(pathname_tmp), file.size(pathname_tmp), sQuote(pathname), file.size(pathname))
stop(msg)
}
invisible(pathname)
}
|
8bb1a0a15f3e3925f568c2162474922f5bc62d72
|
2921619274fa59a739d5722e5e4fa75151126d6b
|
/R/data_katastralni_uzemi.R
|
32eb8acf3ede85b0511188d0bb4cf4f6b719ef5a
|
[
"MIT"
] |
permissive
|
JanCaha/CzechData
|
494f8ce3a6587abc06f63f0e1afd664dcd189916
|
ebe77c0ad444ba7e2b6eb4f9ce290e8852d9564c
|
refs/heads/master
| 2022-12-21T08:38:28.376988
| 2021-12-14T17:03:44
| 2021-12-14T17:03:44
| 166,657,541
| 8
| 2
|
MIT
| 2022-12-20T19:07:16
| 2019-01-20T12:17:14
|
R
|
UTF-8
|
R
| false
| false
| 868
|
r
|
data_katastralni_uzemi.R
|
#' data.frame of all cadastral territories in Czech Republic
#'
#' A dataset containing the names and other attributes of all 13,078
#' cadastral territories in Czech Republic. The codes (every column with string kod in
#' name) are treated as character strings even though that some of them are numbers. These codes,
#' however, serve only as IDs.
#' Colums with suffix _kod are various levels of self-government units in Czech Republic.
#'
#' @format A data frame with 13078 rows and 9 variables:
#' \describe{
#' \item{kod}{id of the cadastral territory}
#' \item{nazev}{name of the cadastral territory}
#' \item{obec_kod}{}
#' \item{pou_kod}{}
#' \item{orp_kod}{}
#' \item{okres_kod}{}
#' \item{lau1_kod}{}
#' \item{vusc_kod}{}
#' \item{prares_kod}{}
#' }
#' @source \url{http://services.cuzk.cz/shp/stat/epsg-5514/1.zip}
"katastralni_uzemi"
|
fcb3833a9701a0d81fd27bd2738152b65bb7bf20
|
b08b7e3160ae9947b6046123acad8f59152375c3
|
/Programming Language Detection/Experiment-2/Dataset/Train/R/knapsack-problem-bounded-2.r
|
29e29a86c09d41be6f8c610116b563023dccd07f
|
[] |
no_license
|
dlaststark/machine-learning-projects
|
efb0a28c664419275e87eb612c89054164fe1eb0
|
eaa0c96d4d1c15934d63035b837636a6d11736e3
|
refs/heads/master
| 2022-12-06T08:36:09.867677
| 2022-11-20T13:17:25
| 2022-11-20T13:17:25
| 246,379,103
| 9
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 709
|
r
|
knapsack-problem-bounded-2.r
|
library(rgenoud)
fitness= function(x= rep(1, nrow(task_table))){
total_value= sum(task_table$value * x)
total_weight= sum(task_table$weight * x)
ifelse(total_weight <= 400, total_value, 400-total_weight)
}
allowed= matrix(c(rep(0, nrow(task_table)), task_table$pieces), ncol = 2)
set.seed(42)
evolution= genoud(fn= fitness,
nvars= nrow(allowed),
max= TRUE,
pop.size= 10000,
data.type.int= TRUE,
Domains= allowed)
cat("Value: ", evolution$value, "\n")
cat("Weight:", sum(task_table$weight * evolution$par), "dag", "\n")
data.frame(item= task_table$items, pieces= as.integer(solution)) %>%
filter(solution> 0)
|
eabdf6a56a6d600f2f538d688403852b71fb27f8
|
3f3a0717863ea89ed1c1ebf692f13aaf54a2acc9
|
/man/estimatesTable.Rd
|
6d2096c202f9812b94f0b54c87f055c9836f986f
|
[] |
no_license
|
cardiomoon/semMediation
|
55cf39159f43589b6d58e05e2a788bc5de387a30
|
ffe9338ae0a64efd604bf5e11439c12e43967681
|
refs/heads/master
| 2018-12-29T09:24:48.533628
| 2018-12-23T14:22:02
| 2018-12-23T14:22:02
| 77,782,004
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,013
|
rd
|
estimatesTable.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CorPlot.R
\name{estimatesTable}
\alias{estimatesTable}
\title{convert parameterEstimates to data.frame}
\usage{
estimatesTable(fit, latent = TRUE, regression = TRUE,
mediation = FALSE, covar = FALSE, ci = FALSE,
standardized = TRUE, digits = 2)
}
\arguments{
\item{fit}{An object of class lavaan. Result of sem function of package lavaan}
\item{latent}{whether the latent variables be included in result}
\item{regression}{whether the regressions be included in result}
\item{mediation}{whether the mediation effects be included in result}
\item{covar}{whether the covariances be included in result}
\item{ci}{If TRUE, confidence intervals are added to the output}
\item{standardized}{Logical. If TRUE, standardized estimates are added to the output}
\item{digits}{integer indicating the number of decimal places (round) or significant digits (signif) to be used.}
}
\description{
convert parameterEstimates to data.frame
}
|
7748a5c8e94efaea05a49bf54fde90f789524de8
|
59d501a829468e393db33cc38a192c1ed154f8ef
|
/man/qqnormsim.Rd
|
a716c84ccd02d88821ca604683cded755fcdece7
|
[] |
no_license
|
aaronbaggett/labs4316
|
9687d80b2db2a73a80478bd343b75111c8821510
|
e467139cd2d14c0b11561db4a2146e7d969bbbce
|
refs/heads/master
| 2020-04-21T08:39:50.833663
| 2019-09-17T18:27:27
| 2019-09-17T18:27:27
| 169,426,048
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 647
|
rd
|
qqnormsim.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qqnormsim.R
\name{qqnormsim}
\alias{qqnormsim}
\title{Generate simulated QQ plots}
\usage{
qqnormsim(sample, data)
}
\arguments{
\item{sample}{the variable to be plotted.}
\item{data}{data frame to use.}
}
\value{
A 3 x 3 grid of qqplots.
}
\description{
Create a 3 x 3 grid of quantile-quantile plots, the first of which corresponds
to the input data. The other eight plots arise from simulating random normal
data with the same mean, standard deviation, and length as the data. For use
in comparing known-normal qqplots to an observed qqplot to assess normality.
}
|
e78e6b54f7cbaa9100953c1e344f3152d1ca1fcb
|
cd4d27b44a869bd1751b9e8dd34f126f48ec57e7
|
/tm_examples.R
|
6e4e361731abaecce8992f0004ffb1fdfeabd0ff
|
[] |
no_license
|
shuckle16/tm_examples
|
9dafcf7ef6876eebc9fc348de76c11b96725e601
|
4c2f0c9d6a5d6ab81d91c4c0e55f54858ac80e7d
|
refs/heads/master
| 2021-01-15T15:44:16.685273
| 2016-10-05T01:42:04
| 2016-10-05T01:42:04
| 49,775,559
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,621
|
r
|
tm_examples.R
|
#install.packages('tm')
library(tm)
sci.electronics <- Corpus(DirSource("sci.electronics")) # A corpus with 981 text documents
talk.religion.misc <- Corpus(DirSource("talk.religion.misc")) # A corpus with 628 text documents
############################
#### Text Data Cleaning ####
############################
# combine documents
c <- c(sci.electronics,talk.religion.misc)
# convert to lower case
res <- tm_map(c,tolower)
inspect(res[1:10])
# remove punctuation
res <- tm_map(res, removePunctuation)
inspect(res[1:10])
# remove numbers
res <- tm_map(res, removeNumbers)
inspect(res[1:10])
# stemming
res <- tm_map(res,stemDocument)
inspect(res[1:10])
# remove stop words
res <- tm_map(res, removeWords, stopwords("english"))
inspect(res[1:10])
# remove additional stop words
res <- tm_map(res, removeWords, c("one","can"))
inspect(res[1:10])
# remove extra white spaces
res <- tm_map(res,stripWhitespace)
inspect(res[1:10])
# document-term matrix
res <- tm_map(res, PlainTextDocument) # compatibility issue
dtm <- DocumentTermMatrix(res)
inspect(dtm)
##################################
#### Frequency Based Analysis ####
##################################
w<-findFreqTerms(dtm,200)
dtm1 <- dtm[,w]
findAssocs(dtm,"war",0.2)
# List unique words sorted by decreasing frequency
term.freq <- apply(dtm, 2, sum)
barplot(sort(term.freq,decreasing=TRUE))
barplot(sort(term.freq,decreasing=TRUE)[1:30])
# remove sparse terms
term_wt <- apply(dtm,2,sum)
dtm <- dtm[,term_wt>10]
##################################
#### Word Cloud Visualization ####
##################################
#install.packages("wordcloud")
library(wordcloud)
term.table <- data.frame(term=names(term.freq),frequency=term.freq)
term.table <- term.table[order(term.freq,decreasing=TRUE),]
head(term.table) # you may optionally save the word frequency table in a file
wordcloud(term.table$term, term.table$frequency, min.freq=50)
#### Compare the word cloud of "sci.electronics" and "talk.religion.misc" corpuses
#### after the same data cleaning preprocessing above
###################################
#### Weighting Terms by TF-IDF ####
###################################
dtm.tfxidf <- weightTfIdf(dtm)
inspect(dtm.tfxidf[1:10, 1:10])
#### Question: Compare the word cloud of "sci.electronics" by TF and by TF-IDF
#### Question: find out about the weight of the word "subject" before and after tfidf conversion
#### How does it change? Why?
#############################################
#### Data Preparation for Classification ####
#############################################
# Transform the corpus back into a dataframe
df <- as.data.frame(as.matrix(dtm))
# This is the vector of class labels correponding to each document
class.labels <- c(rep("E",length(sci.electronics)),rep("R",length(talk.religion.misc)))
df <- cbind(class.labels,df)
# Use a random subset of 75% documents as training
train.ind <- rbinom(nrow(df), size=1, prob=0.75)
training.data <- df[train.ind==1,]
testing.data <- df[train.ind==0,]
################################
#### naive Bayes classifier ####
################################
#install.packages("e1071")
library(e1071)
# train the model
mybayes <- naiveBayes(class.labels ~ ., data=training.data)
# compare the predicted and the actual
testing <- predict(mybayes, testing.data)
table(testing, testing.data$class.labels, dnn=list('predicted','actual'))
# class distribution (prior)
mybayes$apriori
# conditional distributions for each attribute
mybayes$tables
# Dimension reduction question?
# What if I am looking for the most representative words for different classes?
|
c389ac7d485befe3d9069d97561c7f34595d0b07
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/MESS/man/soccer.Rd
|
2b87e0091421f0b31033da43b826d30de3129ba4
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,119
|
rd
|
soccer.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MESS-package.R
\docType{data}
\name{soccer}
\alias{soccer}
\title{Danish national soccer players}
\format{
A data frame with 805 observations on the following 5 variables.
\describe{ \item{name}{a factor with names of the players}
\item{DoB}{a Date. The date-of-birth of the player}
\item{position}{a factor with levels \code{Forward} \code{Defender}
\code{Midfielder} \code{Goalkeeper}} \item{matches}{a numeric
vector. The number of A matches played by the player} \item{goals}{a
numeric vector. The number of goals scored by the player in A matches} }
}
\source{
Data collected from the player database of DBU on March 21st, 2014.
See \url{http://www.dbu.dk} for more information.
}
\description{
Players on the Danish national soccer team. The dataset consists of all
players who have been picked to play on the men's senior A-team, their
position, date-of-birth, goals and matches.
}
\examples{
data(soccer)
birthmonth <- as.numeric(format(soccer$DoB, "\%m"))
birthyear <- as.numeric(format(soccer$DoB, "\%Y"))
}
\keyword{datasets}
|
df9ed9d0547502a66e4e83ea2ed3c95b95d77348
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/NMOF/examples/TA.info.Rd.R
|
faa57bae7cc0ecf0b41bfe2b375b7635c1c98ded
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,663
|
r
|
TA.info.Rd.R
|
library(NMOF)
### Name: TA.info
### Title: Threshold-Accepting Information
### Aliases: TA.info
### ** Examples
### MINIMAL EXAMPLE for TAopt
## objective function evaluates to a constant
fun <- function(x)
0
## neighbourhood function does not even change the solution,
## but it reports information
nb <- function(x) {
tmp <- TA.info()
cat("current threshold ", tmp$threshold,
"| current step ", tmp$step,
"| current iteration ", tmp$iteration, "\n")
x
}
## run TA
algo <- list(nS = 5,
nT = 2,
nD = 3,
x0 = rep(0, 5),
neighbour = nb,
printBar = FALSE,
printDetail = FALSE)
ignore <- TAopt(fun, algo)
## printed output:
## current threshold NA | current step 1 | current iteration NA
## current threshold NA | current step 2 | current iteration NA
## current threshold NA | current step 3 | current iteration NA
## current threshold 1 | current step 1 | current iteration 1
## current threshold 1 | current step 2 | current iteration 2
## current threshold 1 | current step 3 | current iteration 3
## current threshold 1 | current step 4 | current iteration 4
## current threshold 1 | current step 5 | current iteration 5
## current threshold 2 | current step 1 | current iteration 6
## current threshold 2 | current step 2 | current iteration 7
## current threshold 2 | current step 3 | current iteration 8
## current threshold 2 | current step 4 | current iteration 9
## current threshold 2 | current step 5 | current iteration 10
|
0b5c3c47cd3474a36bef8e47a7d2261f4e576b4a
|
9216e41e93c0224b2a44a1cc5227986031abcdaa
|
/global.R
|
364d170ca9ec415b75e169d078360dd241ca8e03
|
[] |
no_license
|
bridgecrew-perf7/Shiny_deploy
|
90e768b6a19405a1afd5b2284aea955b0c606c7d
|
abeb1858a754489874986db097b68aad348cd579
|
refs/heads/main
| 2023-06-06T07:32:54.454133
| 2021-06-11T06:08:32
| 2021-06-11T06:08:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 437
|
r
|
global.R
|
# Plot libraries
# --------------------
library(ggplot2)
library(ggthemes)
library(plotly)
# Data manipulation libraries
# --------------------
library(tidyverse)
library(dplyr)
library(reshape2)
# Shiny Libraries
# ---------------
library(shiny)
library(shinyglide)
library(shinyjs)
library(shinydashboard)
library(bs4Dash)
# Model Libraries
# ---------------
library(randomForest)
library(limma)
# Data
# ----
load("./data.RData")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.